1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/units.h>
4 #include <linux/io-64-nonatomic-lo-hi.h>
5 #include <linux/device.h>
6 #include <linux/delay.h>
7 #include <linux/pci.h>
8 #include <linux/pci-doe.h>
9 #include <linux/aer.h>
10 #include <cxlpci.h>
11 #include <cxlmem.h>
12 #include <cxl.h>
13 #include "core.h"
14 #include "trace.h"
15
16 /**
17 * DOC: cxl core pci
18 *
19 * Compute Express Link protocols are layered on top of PCIe. CXL core provides
20 * a set of helpers for CXL interactions which occur via PCIe.
21 */
22
23 static unsigned short media_ready_timeout = 60;
24 module_param(media_ready_timeout, ushort, 0644);
25 MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
26
27 struct cxl_walk_context {
28 struct pci_bus *bus;
29 struct cxl_port *port;
30 int type;
31 int error;
32 int count;
33 };
34
match_add_dports(struct pci_dev * pdev,void * data)35 static int match_add_dports(struct pci_dev *pdev, void *data)
36 {
37 struct cxl_walk_context *ctx = data;
38 struct cxl_port *port = ctx->port;
39 int type = pci_pcie_type(pdev);
40 struct cxl_register_map map;
41 struct cxl_dport *dport;
42 u32 lnkcap, port_num;
43 int rc;
44
45 if (pdev->bus != ctx->bus)
46 return 0;
47 if (!pci_is_pcie(pdev))
48 return 0;
49 if (type != ctx->type)
50 return 0;
51 if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
52 &lnkcap))
53 return 0;
54
55 rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
56 if (rc)
57 dev_dbg(&port->dev, "failed to find component registers\n");
58
59 port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
60 dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource);
61 if (IS_ERR(dport)) {
62 ctx->error = PTR_ERR(dport);
63 return PTR_ERR(dport);
64 }
65 ctx->count++;
66
67 return 0;
68 }
69
70 /**
71 * devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port
72 * @port: cxl_port whose ->uport_dev is the upstream of dports to be enumerated
73 *
74 * Returns a positive number of dports enumerated or a negative error
75 * code.
76 */
devm_cxl_port_enumerate_dports(struct cxl_port * port)77 int devm_cxl_port_enumerate_dports(struct cxl_port *port)
78 {
79 struct pci_bus *bus = cxl_port_to_pci_bus(port);
80 struct cxl_walk_context ctx;
81 int type;
82
83 if (!bus)
84 return -ENXIO;
85
86 if (pci_is_root_bus(bus))
87 type = PCI_EXP_TYPE_ROOT_PORT;
88 else
89 type = PCI_EXP_TYPE_DOWNSTREAM;
90
91 ctx = (struct cxl_walk_context) {
92 .port = port,
93 .bus = bus,
94 .type = type,
95 };
96 pci_walk_bus(bus, match_add_dports, &ctx);
97
98 if (ctx.count == 0)
99 return -ENODEV;
100 if (ctx.error)
101 return ctx.error;
102 return ctx.count;
103 }
104 EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, "CXL");
105
cxl_dvsec_mem_range_valid(struct cxl_dev_state * cxlds,int id)106 static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
107 {
108 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
109 int d = cxlds->cxl_dvsec;
110 bool valid = false;
111 int rc, i;
112 u32 temp;
113
114 if (id > CXL_DVSEC_RANGE_MAX)
115 return -EINVAL;
116
117 /* Check MEM INFO VALID bit first, give up after 1s */
118 i = 1;
119 do {
120 rc = pci_read_config_dword(pdev,
121 d + CXL_DVSEC_RANGE_SIZE_LOW(id),
122 &temp);
123 if (rc)
124 return rc;
125
126 valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp);
127 if (valid)
128 break;
129 msleep(1000);
130 } while (i--);
131
132 if (!valid) {
133 dev_err(&pdev->dev,
134 "Timeout awaiting memory range %d valid after 1s.\n",
135 id);
136 return -ETIMEDOUT;
137 }
138
139 return 0;
140 }
141
cxl_dvsec_mem_range_active(struct cxl_dev_state * cxlds,int id)142 static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id)
143 {
144 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
145 int d = cxlds->cxl_dvsec;
146 bool active = false;
147 int rc, i;
148 u32 temp;
149
150 if (id > CXL_DVSEC_RANGE_MAX)
151 return -EINVAL;
152
153 /* Check MEM ACTIVE bit, up to 60s timeout by default */
154 for (i = media_ready_timeout; i; i--) {
155 rc = pci_read_config_dword(
156 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp);
157 if (rc)
158 return rc;
159
160 active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp);
161 if (active)
162 break;
163 msleep(1000);
164 }
165
166 if (!active) {
167 dev_err(&pdev->dev,
168 "timeout awaiting memory active after %d seconds\n",
169 media_ready_timeout);
170 return -ETIMEDOUT;
171 }
172
173 return 0;
174 }
175
176 /*
177 * Wait up to @media_ready_timeout for the device to report memory
178 * active.
179 */
cxl_await_media_ready(struct cxl_dev_state * cxlds)180 int cxl_await_media_ready(struct cxl_dev_state *cxlds)
181 {
182 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
183 int d = cxlds->cxl_dvsec;
184 int rc, i, hdm_count;
185 u64 md_status;
186 u16 cap;
187
188 rc = pci_read_config_word(pdev,
189 d + CXL_DVSEC_CAP_OFFSET, &cap);
190 if (rc)
191 return rc;
192
193 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
194 for (i = 0; i < hdm_count; i++) {
195 rc = cxl_dvsec_mem_range_valid(cxlds, i);
196 if (rc)
197 return rc;
198 }
199
200 for (i = 0; i < hdm_count; i++) {
201 rc = cxl_dvsec_mem_range_active(cxlds, i);
202 if (rc)
203 return rc;
204 }
205
206 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
207 if (!CXLMDEV_READY(md_status))
208 return -EIO;
209
210 return 0;
211 }
212 EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, "CXL");
213
cxl_set_mem_enable(struct cxl_dev_state * cxlds,u16 val)214 static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val)
215 {
216 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
217 int d = cxlds->cxl_dvsec;
218 u16 ctrl;
219 int rc;
220
221 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
222 if (rc < 0)
223 return rc;
224
225 if ((ctrl & CXL_DVSEC_MEM_ENABLE) == val)
226 return 1;
227 ctrl &= ~CXL_DVSEC_MEM_ENABLE;
228 ctrl |= val;
229
230 rc = pci_write_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, ctrl);
231 if (rc < 0)
232 return rc;
233
234 return 0;
235 }
236
clear_mem_enable(void * cxlds)237 static void clear_mem_enable(void *cxlds)
238 {
239 cxl_set_mem_enable(cxlds, 0);
240 }
241
devm_cxl_enable_mem(struct device * host,struct cxl_dev_state * cxlds)242 static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds)
243 {
244 int rc;
245
246 rc = cxl_set_mem_enable(cxlds, CXL_DVSEC_MEM_ENABLE);
247 if (rc < 0)
248 return rc;
249 if (rc > 0)
250 return 0;
251 return devm_add_action_or_reset(host, clear_mem_enable, cxlds);
252 }
253
254 /* require dvsec ranges to be covered by a locked platform window */
dvsec_range_allowed(struct device * dev,void * arg)255 static int dvsec_range_allowed(struct device *dev, void *arg)
256 {
257 struct range *dev_range = arg;
258 struct cxl_decoder *cxld;
259
260 if (!is_root_decoder(dev))
261 return 0;
262
263 cxld = to_cxl_decoder(dev);
264
265 if (!(cxld->flags & CXL_DECODER_F_RAM))
266 return 0;
267
268 return range_contains(&cxld->hpa_range, dev_range);
269 }
270
disable_hdm(void * _cxlhdm)271 static void disable_hdm(void *_cxlhdm)
272 {
273 u32 global_ctrl;
274 struct cxl_hdm *cxlhdm = _cxlhdm;
275 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
276
277 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
278 writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE,
279 hdm + CXL_HDM_DECODER_CTRL_OFFSET);
280 }
281
devm_cxl_enable_hdm(struct device * host,struct cxl_hdm * cxlhdm)282 static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
283 {
284 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
285 u32 global_ctrl;
286
287 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
288 writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
289 hdm + CXL_HDM_DECODER_CTRL_OFFSET);
290
291 return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
292 }
293
cxl_dvsec_rr_decode(struct device * dev,struct cxl_port * port,struct cxl_endpoint_dvsec_info * info)294 int cxl_dvsec_rr_decode(struct device *dev, struct cxl_port *port,
295 struct cxl_endpoint_dvsec_info *info)
296 {
297 struct pci_dev *pdev = to_pci_dev(dev);
298 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
299 int hdm_count, rc, i, ranges = 0;
300 int d = cxlds->cxl_dvsec;
301 u16 cap, ctrl;
302
303 if (!d) {
304 dev_dbg(dev, "No DVSEC Capability\n");
305 return -ENXIO;
306 }
307
308 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap);
309 if (rc)
310 return rc;
311
312 if (!(cap & CXL_DVSEC_MEM_CAPABLE)) {
313 dev_dbg(dev, "Not MEM Capable\n");
314 return -ENXIO;
315 }
316
317 /*
318 * It is not allowed by spec for MEM.capable to be set and have 0 legacy
319 * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this
320 * driver is for a spec defined class code which must be CXL.mem
321 * capable, there is no point in continuing to enable CXL.mem.
322 */
323 hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
324 if (!hdm_count || hdm_count > 2)
325 return -EINVAL;
326
327 /*
328 * The current DVSEC values are moot if the memory capability is
329 * disabled, and they will remain moot after the HDM Decoder
330 * capability is enabled.
331 */
332 rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl);
333 if (rc)
334 return rc;
335
336 info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl);
337 if (!info->mem_enabled)
338 return 0;
339
340 for (i = 0; i < hdm_count; i++) {
341 u64 base, size;
342 u32 temp;
343
344 rc = cxl_dvsec_mem_range_valid(cxlds, i);
345 if (rc)
346 return rc;
347
348 rc = pci_read_config_dword(
349 pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp);
350 if (rc)
351 return rc;
352
353 size = (u64)temp << 32;
354
355 rc = pci_read_config_dword(
356 pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp);
357 if (rc)
358 return rc;
359
360 size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK;
361 if (!size) {
362 continue;
363 }
364
365 rc = pci_read_config_dword(
366 pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp);
367 if (rc)
368 return rc;
369
370 base = (u64)temp << 32;
371
372 rc = pci_read_config_dword(
373 pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp);
374 if (rc)
375 return rc;
376
377 base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK;
378
379 info->dvsec_range[ranges++] = (struct range) {
380 .start = base,
381 .end = base + size - 1
382 };
383 }
384
385 info->ranges = ranges;
386
387 return 0;
388 }
389 EXPORT_SYMBOL_NS_GPL(cxl_dvsec_rr_decode, "CXL");
390
391 /**
392 * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint
393 * @cxlds: Device state
394 * @cxlhdm: Mapped HDM decoder Capability
395 * @info: Cached DVSEC range registers info
396 *
397 * Try to enable the endpoint's HDM Decoder Capability
398 */
cxl_hdm_decode_init(struct cxl_dev_state * cxlds,struct cxl_hdm * cxlhdm,struct cxl_endpoint_dvsec_info * info)399 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
400 struct cxl_endpoint_dvsec_info *info)
401 {
402 void __iomem *hdm = cxlhdm->regs.hdm_decoder;
403 struct cxl_port *port = cxlhdm->port;
404 struct device *dev = cxlds->dev;
405 struct cxl_port *root;
406 int i, rc, allowed;
407 u32 global_ctrl = 0;
408
409 if (hdm)
410 global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
411
412 /*
413 * If the HDM Decoder Capability is already enabled then assume
414 * that some other agent like platform firmware set it up.
415 */
416 if (global_ctrl & CXL_HDM_DECODER_ENABLE || (!hdm && info->mem_enabled))
417 return devm_cxl_enable_mem(&port->dev, cxlds);
418 else if (!hdm)
419 return -ENODEV;
420
421 root = to_cxl_port(port->dev.parent);
422 while (!is_cxl_root(root) && is_cxl_port(root->dev.parent))
423 root = to_cxl_port(root->dev.parent);
424 if (!is_cxl_root(root)) {
425 dev_err(dev, "Failed to acquire root port for HDM enable\n");
426 return -ENODEV;
427 }
428
429 if (!info->mem_enabled) {
430 rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
431 if (rc)
432 return rc;
433
434 return devm_cxl_enable_mem(&port->dev, cxlds);
435 }
436
437 for (i = 0, allowed = 0; i < info->ranges; i++) {
438 struct device *cxld_dev;
439
440 cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i],
441 dvsec_range_allowed);
442 if (!cxld_dev) {
443 dev_dbg(dev, "DVSEC Range%d denied by platform\n", i);
444 continue;
445 }
446 dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i);
447 put_device(cxld_dev);
448 allowed++;
449 }
450
451 if (!allowed) {
452 dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
453 return -ENXIO;
454 }
455
456 /*
457 * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base
458 * [High,Low] when HDM operation is enabled the range register values
459 * are ignored by the device, but the spec also recommends matching the
460 * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges
461 * are expected even though Linux does not require or maintain that
462 * match. If at least one DVSEC range is enabled and allowed, skip HDM
463 * Decoder Capability Enable.
464 */
465 return 0;
466 }
467 EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, "CXL");
468
469 #define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff
470 #define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0
471 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00
472 #define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0
473 #define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000
474 #define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff
475 #define CXL_DOE_PROTOCOL_TABLE_ACCESS 2
476
477 #define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
478 (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
479 CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
480 FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
481 CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \
482 FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle)))
483
cxl_cdat_get_length(struct device * dev,struct pci_doe_mb * doe_mb,size_t * length)484 static int cxl_cdat_get_length(struct device *dev,
485 struct pci_doe_mb *doe_mb,
486 size_t *length)
487 {
488 __le32 request = CDAT_DOE_REQ(0);
489 __le32 response[2];
490 int rc;
491
492 rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL,
493 CXL_DOE_PROTOCOL_TABLE_ACCESS,
494 &request, sizeof(request),
495 &response, sizeof(response));
496 if (rc < 0) {
497 dev_err(dev, "DOE failed: %d", rc);
498 return rc;
499 }
500 if (rc < sizeof(response))
501 return -EIO;
502
503 *length = le32_to_cpu(response[1]);
504 dev_dbg(dev, "CDAT length %zu\n", *length);
505
506 return 0;
507 }
508
cxl_cdat_read_table(struct device * dev,struct pci_doe_mb * doe_mb,struct cdat_doe_rsp * rsp,size_t * length)509 static int cxl_cdat_read_table(struct device *dev,
510 struct pci_doe_mb *doe_mb,
511 struct cdat_doe_rsp *rsp, size_t *length)
512 {
513 size_t received, remaining = *length;
514 unsigned int entry_handle = 0;
515 union cdat_data *data;
516 __le32 saved_dw = 0;
517
518 do {
519 __le32 request = CDAT_DOE_REQ(entry_handle);
520 int rc;
521
522 rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL,
523 CXL_DOE_PROTOCOL_TABLE_ACCESS,
524 &request, sizeof(request),
525 rsp, sizeof(*rsp) + remaining);
526 if (rc < 0) {
527 dev_err(dev, "DOE failed: %d", rc);
528 return rc;
529 }
530
531 if (rc < sizeof(*rsp))
532 return -EIO;
533
534 data = (union cdat_data *)rsp->data;
535 received = rc - sizeof(*rsp);
536
537 if (entry_handle == 0) {
538 if (received != sizeof(data->header))
539 return -EIO;
540 } else {
541 if (received < sizeof(data->entry) ||
542 received != le16_to_cpu(data->entry.length))
543 return -EIO;
544 }
545
546 /* Get the CXL table access header entry handle */
547 entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
548 le32_to_cpu(rsp->doe_header));
549
550 /*
551 * Table Access Response Header overwrote the last DW of
552 * previous entry, so restore that DW
553 */
554 rsp->doe_header = saved_dw;
555 remaining -= received;
556 rsp = (void *)rsp + received;
557 saved_dw = rsp->doe_header;
558 } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
559
560 /* Length in CDAT header may exceed concatenation of CDAT entries */
561 *length -= remaining;
562
563 return 0;
564 }
565
cdat_checksum(void * buf,size_t size)566 static unsigned char cdat_checksum(void *buf, size_t size)
567 {
568 unsigned char sum, *data = buf;
569 size_t i;
570
571 for (sum = 0, i = 0; i < size; i++)
572 sum += data[i];
573 return sum;
574 }
575
576 /**
577 * read_cdat_data - Read the CDAT data on this port
578 * @port: Port to read data from
579 *
580 * This call will sleep waiting for responses from the DOE mailbox.
581 */
read_cdat_data(struct cxl_port * port)582 void read_cdat_data(struct cxl_port *port)
583 {
584 struct device *uport = port->uport_dev;
585 struct device *dev = &port->dev;
586 struct pci_doe_mb *doe_mb;
587 struct pci_dev *pdev = NULL;
588 struct cxl_memdev *cxlmd;
589 struct cdat_doe_rsp *buf;
590 size_t table_length, length;
591 int rc;
592
593 if (is_cxl_memdev(uport)) {
594 struct device *host;
595
596 cxlmd = to_cxl_memdev(uport);
597 host = cxlmd->dev.parent;
598 if (dev_is_pci(host))
599 pdev = to_pci_dev(host);
600 } else if (dev_is_pci(uport)) {
601 pdev = to_pci_dev(uport);
602 }
603
604 if (!pdev)
605 return;
606
607 doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_CXL,
608 CXL_DOE_PROTOCOL_TABLE_ACCESS);
609 if (!doe_mb) {
610 dev_dbg(dev, "No CDAT mailbox\n");
611 return;
612 }
613
614 port->cdat_available = true;
615
616 if (cxl_cdat_get_length(dev, doe_mb, &length)) {
617 dev_dbg(dev, "No CDAT length\n");
618 return;
619 }
620
621 /*
622 * The begin of the CDAT buffer needs space for additional 4
623 * bytes for the DOE header. Table data starts afterwards.
624 */
625 buf = devm_kzalloc(dev, sizeof(*buf) + length, GFP_KERNEL);
626 if (!buf)
627 goto err;
628
629 table_length = length;
630
631 rc = cxl_cdat_read_table(dev, doe_mb, buf, &length);
632 if (rc)
633 goto err;
634
635 if (table_length != length)
636 dev_warn(dev, "Malformed CDAT table length (%zu:%zu), discarding trailing data\n",
637 table_length, length);
638
639 if (cdat_checksum(buf->data, length))
640 goto err;
641
642 port->cdat.table = buf->data;
643 port->cdat.length = length;
644
645 return;
646 err:
647 /* Don't leave table data allocated on error */
648 devm_kfree(dev, buf);
649 dev_err(dev, "Failed to read/validate CDAT.\n");
650 }
651 EXPORT_SYMBOL_NS_GPL(read_cdat_data, "CXL");
652
__cxl_handle_cor_ras(struct cxl_dev_state * cxlds,void __iomem * ras_base)653 static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds,
654 void __iomem *ras_base)
655 {
656 void __iomem *addr;
657 u32 status;
658
659 if (!ras_base)
660 return;
661
662 addr = ras_base + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
663 status = readl(addr);
664 if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
665 writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
666 trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
667 }
668 }
669
cxl_handle_endpoint_cor_ras(struct cxl_dev_state * cxlds)670 static void cxl_handle_endpoint_cor_ras(struct cxl_dev_state *cxlds)
671 {
672 return __cxl_handle_cor_ras(cxlds, cxlds->regs.ras);
673 }
674
675 /* CXL spec rev3.0 8.2.4.16.1 */
header_log_copy(void __iomem * ras_base,u32 * log)676 static void header_log_copy(void __iomem *ras_base, u32 *log)
677 {
678 void __iomem *addr;
679 u32 *log_addr;
680 int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
681
682 addr = ras_base + CXL_RAS_HEADER_LOG_OFFSET;
683 log_addr = log;
684
685 for (i = 0; i < log_u32_size; i++) {
686 *log_addr = readl(addr);
687 log_addr++;
688 addr += sizeof(u32);
689 }
690 }
691
692 /*
693 * Log the state of the RAS status registers and prepare them to log the
694 * next error status. Return 1 if reset needed.
695 */
__cxl_handle_ras(struct cxl_dev_state * cxlds,void __iomem * ras_base)696 static bool __cxl_handle_ras(struct cxl_dev_state *cxlds,
697 void __iomem *ras_base)
698 {
699 u32 hl[CXL_HEADERLOG_SIZE_U32];
700 void __iomem *addr;
701 u32 status;
702 u32 fe;
703
704 if (!ras_base)
705 return false;
706
707 addr = ras_base + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
708 status = readl(addr);
709 if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
710 return false;
711
712 /* If multiple errors, log header points to first error from ctrl reg */
713 if (hweight32(status) > 1) {
714 void __iomem *rcc_addr =
715 ras_base + CXL_RAS_CAP_CONTROL_OFFSET;
716
717 fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
718 readl(rcc_addr)));
719 } else {
720 fe = status;
721 }
722
723 header_log_copy(ras_base, hl);
724 trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl);
725 writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
726
727 return true;
728 }
729
cxl_handle_endpoint_ras(struct cxl_dev_state * cxlds)730 static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds)
731 {
732 return __cxl_handle_ras(cxlds, cxlds->regs.ras);
733 }
734
735 #ifdef CONFIG_PCIEAER_CXL
736
cxl_dport_map_rch_aer(struct cxl_dport * dport)737 static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
738 {
739 resource_size_t aer_phys;
740 struct device *host;
741 u16 aer_cap;
742
743 aer_cap = cxl_rcrb_to_aer(dport->dport_dev, dport->rcrb.base);
744 if (aer_cap) {
745 host = dport->reg_map.host;
746 aer_phys = aer_cap + dport->rcrb.base;
747 dport->regs.dport_aer = devm_cxl_iomap_block(host, aer_phys,
748 sizeof(struct aer_capability_regs));
749 }
750 }
751
cxl_dport_map_ras(struct cxl_dport * dport)752 static void cxl_dport_map_ras(struct cxl_dport *dport)
753 {
754 struct cxl_register_map *map = &dport->reg_map;
755 struct device *dev = dport->dport_dev;
756
757 if (!map->component_map.ras.valid)
758 dev_dbg(dev, "RAS registers not found\n");
759 else if (cxl_map_component_regs(map, &dport->regs.component,
760 BIT(CXL_CM_CAP_CAP_ID_RAS)))
761 dev_dbg(dev, "Failed to map RAS capability.\n");
762 }
763
cxl_disable_rch_root_ints(struct cxl_dport * dport)764 static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
765 {
766 void __iomem *aer_base = dport->regs.dport_aer;
767 u32 aer_cmd_mask, aer_cmd;
768
769 if (!aer_base)
770 return;
771
772 /*
773 * Disable RCH root port command interrupts.
774 * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors
775 *
776 * This sequence may not be necessary. CXL spec states disabling
777 * the root cmd register's interrupts is required. But, PCI spec
778 * shows these are disabled by default on reset.
779 */
780 aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
781 PCI_ERR_ROOT_CMD_NONFATAL_EN |
782 PCI_ERR_ROOT_CMD_FATAL_EN);
783 aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
784 aer_cmd &= ~aer_cmd_mask;
785 writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
786 }
787
788 /**
789 * cxl_dport_init_ras_reporting - Setup CXL RAS report on this dport
790 * @dport: the cxl_dport that needs to be initialized
791 * @host: host device for devm operations
792 */
cxl_dport_init_ras_reporting(struct cxl_dport * dport,struct device * host)793 void cxl_dport_init_ras_reporting(struct cxl_dport *dport, struct device *host)
794 {
795 dport->reg_map.host = host;
796 cxl_dport_map_ras(dport);
797
798 if (dport->rch) {
799 struct pci_host_bridge *host_bridge = to_pci_host_bridge(dport->dport_dev);
800
801 if (!host_bridge->native_aer)
802 return;
803
804 cxl_dport_map_rch_aer(dport);
805 cxl_disable_rch_root_ints(dport);
806 }
807 }
808 EXPORT_SYMBOL_NS_GPL(cxl_dport_init_ras_reporting, "CXL");
809
cxl_handle_rdport_cor_ras(struct cxl_dev_state * cxlds,struct cxl_dport * dport)810 static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
811 struct cxl_dport *dport)
812 {
813 return __cxl_handle_cor_ras(cxlds, dport->regs.ras);
814 }
815
cxl_handle_rdport_ras(struct cxl_dev_state * cxlds,struct cxl_dport * dport)816 static bool cxl_handle_rdport_ras(struct cxl_dev_state *cxlds,
817 struct cxl_dport *dport)
818 {
819 return __cxl_handle_ras(cxlds, dport->regs.ras);
820 }
821
822 /*
823 * Copy the AER capability registers using 32 bit read accesses.
824 * This is necessary because RCRB AER capability is MMIO mapped. Clear the
825 * status after copying.
826 *
827 * @aer_base: base address of AER capability block in RCRB
828 * @aer_regs: destination for copying AER capability
829 */
cxl_rch_get_aer_info(void __iomem * aer_base,struct aer_capability_regs * aer_regs)830 static bool cxl_rch_get_aer_info(void __iomem *aer_base,
831 struct aer_capability_regs *aer_regs)
832 {
833 int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32);
834 u32 *aer_regs_buf = (u32 *)aer_regs;
835 int n;
836
837 if (!aer_base)
838 return false;
839
840 /* Use readl() to guarantee 32-bit accesses */
841 for (n = 0; n < read_cnt; n++)
842 aer_regs_buf[n] = readl(aer_base + n * sizeof(u32));
843
844 writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS);
845 writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS);
846
847 return true;
848 }
849
850 /* Get AER severity. Return false if there is no error. */
cxl_rch_get_aer_severity(struct aer_capability_regs * aer_regs,int * severity)851 static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs,
852 int *severity)
853 {
854 if (aer_regs->uncor_status & ~aer_regs->uncor_mask) {
855 if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV)
856 *severity = AER_FATAL;
857 else
858 *severity = AER_NONFATAL;
859 return true;
860 }
861
862 if (aer_regs->cor_status & ~aer_regs->cor_mask) {
863 *severity = AER_CORRECTABLE;
864 return true;
865 }
866
867 return false;
868 }
869
cxl_handle_rdport_errors(struct cxl_dev_state * cxlds)870 static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
871 {
872 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
873 struct aer_capability_regs aer_regs;
874 struct cxl_dport *dport;
875 int severity;
876
877 struct cxl_port *port __free(put_cxl_port) =
878 cxl_pci_find_port(pdev, &dport);
879 if (!port)
880 return;
881
882 if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
883 return;
884
885 if (!cxl_rch_get_aer_severity(&aer_regs, &severity))
886 return;
887
888 pci_print_aer(pdev, severity, &aer_regs);
889
890 if (severity == AER_CORRECTABLE)
891 cxl_handle_rdport_cor_ras(cxlds, dport);
892 else
893 cxl_handle_rdport_ras(cxlds, dport);
894 }
895
896 #else
cxl_handle_rdport_errors(struct cxl_dev_state * cxlds)897 static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
898 #endif
899
cxl_cor_error_detected(struct pci_dev * pdev)900 void cxl_cor_error_detected(struct pci_dev *pdev)
901 {
902 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
903 struct device *dev = &cxlds->cxlmd->dev;
904
905 scoped_guard(device, dev) {
906 if (!dev->driver) {
907 dev_warn(&pdev->dev,
908 "%s: memdev disabled, abort error handling\n",
909 dev_name(dev));
910 return;
911 }
912
913 if (cxlds->rcd)
914 cxl_handle_rdport_errors(cxlds);
915
916 cxl_handle_endpoint_cor_ras(cxlds);
917 }
918 }
919 EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, "CXL");
920
cxl_error_detected(struct pci_dev * pdev,pci_channel_state_t state)921 pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
922 pci_channel_state_t state)
923 {
924 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
925 struct cxl_memdev *cxlmd = cxlds->cxlmd;
926 struct device *dev = &cxlmd->dev;
927 bool ue;
928
929 scoped_guard(device, dev) {
930 if (!dev->driver) {
931 dev_warn(&pdev->dev,
932 "%s: memdev disabled, abort error handling\n",
933 dev_name(dev));
934 return PCI_ERS_RESULT_DISCONNECT;
935 }
936
937 if (cxlds->rcd)
938 cxl_handle_rdport_errors(cxlds);
939 /*
940 * A frozen channel indicates an impending reset which is fatal to
941 * CXL.mem operation, and will likely crash the system. On the off
942 * chance the situation is recoverable dump the status of the RAS
943 * capability registers and bounce the active state of the memdev.
944 */
945 ue = cxl_handle_endpoint_ras(cxlds);
946 }
947
948
949 switch (state) {
950 case pci_channel_io_normal:
951 if (ue) {
952 device_release_driver(dev);
953 return PCI_ERS_RESULT_NEED_RESET;
954 }
955 return PCI_ERS_RESULT_CAN_RECOVER;
956 case pci_channel_io_frozen:
957 dev_warn(&pdev->dev,
958 "%s: frozen state error detected, disable CXL.mem\n",
959 dev_name(dev));
960 device_release_driver(dev);
961 return PCI_ERS_RESULT_NEED_RESET;
962 case pci_channel_io_perm_failure:
963 dev_warn(&pdev->dev,
964 "failure state error detected, request disconnect\n");
965 return PCI_ERS_RESULT_DISCONNECT;
966 }
967 return PCI_ERS_RESULT_NEED_RESET;
968 }
969 EXPORT_SYMBOL_NS_GPL(cxl_error_detected, "CXL");
970
cxl_flit_size(struct pci_dev * pdev)971 static int cxl_flit_size(struct pci_dev *pdev)
972 {
973 if (cxl_pci_flit_256(pdev))
974 return 256;
975
976 return 68;
977 }
978
979 /**
980 * cxl_pci_get_latency - calculate the link latency for the PCIe link
981 * @pdev: PCI device
982 *
983 * return: calculated latency or 0 for no latency
984 *
985 * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
986 * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
987 * LinkProgationLatency is negligible, so 0 will be used
988 * RetimerLatency is assumed to be negligible and 0 will be used
989 * FlitLatency = FlitSize / LinkBandwidth
990 * FlitSize is defined by spec. CXL rev3.0 4.2.1.
991 * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
992 * The FlitLatency is converted to picoseconds.
993 */
cxl_pci_get_latency(struct pci_dev * pdev)994 long cxl_pci_get_latency(struct pci_dev *pdev)
995 {
996 long bw;
997
998 bw = pcie_link_speed_mbps(pdev);
999 if (bw < 0)
1000 return 0;
1001 bw /= BITS_PER_BYTE;
1002
1003 return cxl_flit_size(pdev) * MEGA / bw;
1004 }
1005
__cxl_endpoint_decoder_reset_detected(struct device * dev,void * data)1006 static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data)
1007 {
1008 struct cxl_port *port = data;
1009 struct cxl_decoder *cxld;
1010 struct cxl_hdm *cxlhdm;
1011 void __iomem *hdm;
1012 u32 ctrl;
1013
1014 if (!is_endpoint_decoder(dev))
1015 return 0;
1016
1017 cxld = to_cxl_decoder(dev);
1018 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
1019 return 0;
1020
1021 cxlhdm = dev_get_drvdata(&port->dev);
1022 hdm = cxlhdm->regs.hdm_decoder;
1023 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
1024
1025 return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl);
1026 }
1027
cxl_endpoint_decoder_reset_detected(struct cxl_port * port)1028 bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port)
1029 {
1030 return device_for_each_child(&port->dev, port,
1031 __cxl_endpoint_decoder_reset_detected);
1032 }
1033 EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, "CXL");
1034
cxl_pci_get_bandwidth(struct pci_dev * pdev,struct access_coordinate * c)1035 int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c)
1036 {
1037 int speed, bw;
1038 u16 lnksta;
1039 u32 width;
1040
1041 speed = pcie_link_speed_mbps(pdev);
1042 if (speed < 0)
1043 return speed;
1044 speed /= BITS_PER_BYTE;
1045
1046 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
1047 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
1048 bw = speed * width;
1049
1050 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
1051 c[i].read_bandwidth = bw;
1052 c[i].write_bandwidth = bw;
1053 }
1054
1055 return 0;
1056 }
1057