10c6e6f13SShiju Jose // SPDX-License-Identifier: GPL-2.0-only
20c6e6f13SShiju Jose /*
30c6e6f13SShiju Jose * CXL EDAC memory feature driver.
40c6e6f13SShiju Jose *
50c6e6f13SShiju Jose * Copyright (c) 2024-2025 HiSilicon Limited.
60c6e6f13SShiju Jose *
70c6e6f13SShiju Jose * - Supports functions to configure EDAC features of the
80c6e6f13SShiju Jose * CXL memory devices.
90c6e6f13SShiju Jose * - Registers with the EDAC device subsystem driver to expose
100c6e6f13SShiju Jose * the features sysfs attributes to the user for configuring
110c6e6f13SShiju Jose * CXL memory RAS feature.
120c6e6f13SShiju Jose */
130c6e6f13SShiju Jose
140c6e6f13SShiju Jose #include <linux/cleanup.h>
150c6e6f13SShiju Jose #include <linux/edac.h>
160c6e6f13SShiju Jose #include <linux/limits.h>
17be9b359eSShiju Jose #include <linux/unaligned.h>
180b5ccb0dSShiju Jose #include <linux/xarray.h>
190c6e6f13SShiju Jose #include <cxl/features.h>
200c6e6f13SShiju Jose #include <cxl.h>
210c6e6f13SShiju Jose #include <cxlmem.h>
220c6e6f13SShiju Jose #include "core.h"
230b5ccb0dSShiju Jose #include "trace.h"
240c6e6f13SShiju Jose
25be9b359eSShiju Jose #define CXL_NR_EDAC_DEV_FEATURES 7
260c6e6f13SShiju Jose
270c6e6f13SShiju Jose #define CXL_SCRUB_NO_REGION -1
280c6e6f13SShiju Jose
290c6e6f13SShiju Jose struct cxl_patrol_scrub_context {
300c6e6f13SShiju Jose u8 instance;
310c6e6f13SShiju Jose u16 get_feat_size;
320c6e6f13SShiju Jose u16 set_feat_size;
330c6e6f13SShiju Jose u8 get_version;
340c6e6f13SShiju Jose u8 set_version;
350c6e6f13SShiju Jose u16 effects;
360c6e6f13SShiju Jose struct cxl_memdev *cxlmd;
370c6e6f13SShiju Jose struct cxl_region *cxlr;
380c6e6f13SShiju Jose };
390c6e6f13SShiju Jose
400c6e6f13SShiju Jose /*
410c6e6f13SShiju Jose * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-222 Device Patrol Scrub Control
420c6e6f13SShiju Jose * Feature Readable Attributes.
430c6e6f13SShiju Jose */
440c6e6f13SShiju Jose struct cxl_scrub_rd_attrbs {
450c6e6f13SShiju Jose u8 scrub_cycle_cap;
460c6e6f13SShiju Jose __le16 scrub_cycle_hours;
470c6e6f13SShiju Jose u8 scrub_flags;
480c6e6f13SShiju Jose } __packed;
490c6e6f13SShiju Jose
500c6e6f13SShiju Jose /*
510c6e6f13SShiju Jose * See CXL spec rev 3.2 @8.2.10.9.11.1 Table 8-223 Device Patrol Scrub Control
520c6e6f13SShiju Jose * Feature Writable Attributes.
530c6e6f13SShiju Jose */
540c6e6f13SShiju Jose struct cxl_scrub_wr_attrbs {
550c6e6f13SShiju Jose u8 scrub_cycle_hours;
560c6e6f13SShiju Jose u8 scrub_flags;
570c6e6f13SShiju Jose } __packed;
580c6e6f13SShiju Jose
590c6e6f13SShiju Jose #define CXL_SCRUB_CONTROL_CHANGEABLE BIT(0)
600c6e6f13SShiju Jose #define CXL_SCRUB_CONTROL_REALTIME BIT(1)
610c6e6f13SShiju Jose #define CXL_SCRUB_CONTROL_CYCLE_MASK GENMASK(7, 0)
620c6e6f13SShiju Jose #define CXL_SCRUB_CONTROL_MIN_CYCLE_MASK GENMASK(15, 8)
630c6e6f13SShiju Jose #define CXL_SCRUB_CONTROL_ENABLE BIT(0)
640c6e6f13SShiju Jose
650c6e6f13SShiju Jose #define CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap) \
660c6e6f13SShiju Jose FIELD_GET(CXL_SCRUB_CONTROL_CHANGEABLE, cap)
670c6e6f13SShiju Jose #define CXL_GET_SCRUB_CYCLE(cycle) \
680c6e6f13SShiju Jose FIELD_GET(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
690c6e6f13SShiju Jose #define CXL_GET_SCRUB_MIN_CYCLE(cycle) \
700c6e6f13SShiju Jose FIELD_GET(CXL_SCRUB_CONTROL_MIN_CYCLE_MASK, cycle)
710c6e6f13SShiju Jose #define CXL_GET_SCRUB_EN_STS(flags) FIELD_GET(CXL_SCRUB_CONTROL_ENABLE, flags)
720c6e6f13SShiju Jose
730c6e6f13SShiju Jose #define CXL_SET_SCRUB_CYCLE(cycle) \
740c6e6f13SShiju Jose FIELD_PREP(CXL_SCRUB_CONTROL_CYCLE_MASK, cycle)
750c6e6f13SShiju Jose #define CXL_SET_SCRUB_EN(en) FIELD_PREP(CXL_SCRUB_CONTROL_ENABLE, en)
760c6e6f13SShiju Jose
cxl_mem_scrub_get_attrbs(struct cxl_mailbox * cxl_mbox,u8 * cap,u16 * cycle,u8 * flags,u8 * min_cycle)770c6e6f13SShiju Jose static int cxl_mem_scrub_get_attrbs(struct cxl_mailbox *cxl_mbox, u8 *cap,
780c6e6f13SShiju Jose u16 *cycle, u8 *flags, u8 *min_cycle)
790c6e6f13SShiju Jose {
800c6e6f13SShiju Jose size_t rd_data_size = sizeof(struct cxl_scrub_rd_attrbs);
810c6e6f13SShiju Jose size_t data_size;
820c6e6f13SShiju Jose struct cxl_scrub_rd_attrbs *rd_attrbs __free(kfree) =
830c6e6f13SShiju Jose kzalloc(rd_data_size, GFP_KERNEL);
840c6e6f13SShiju Jose if (!rd_attrbs)
850c6e6f13SShiju Jose return -ENOMEM;
860c6e6f13SShiju Jose
870c6e6f13SShiju Jose data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
880c6e6f13SShiju Jose CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
890c6e6f13SShiju Jose rd_data_size, 0, NULL);
900c6e6f13SShiju Jose if (!data_size)
910c6e6f13SShiju Jose return -EIO;
920c6e6f13SShiju Jose
930c6e6f13SShiju Jose *cap = rd_attrbs->scrub_cycle_cap;
940c6e6f13SShiju Jose *cycle = le16_to_cpu(rd_attrbs->scrub_cycle_hours);
950c6e6f13SShiju Jose *flags = rd_attrbs->scrub_flags;
960c6e6f13SShiju Jose if (min_cycle)
970c6e6f13SShiju Jose *min_cycle = CXL_GET_SCRUB_MIN_CYCLE(*cycle);
980c6e6f13SShiju Jose
990c6e6f13SShiju Jose return 0;
1000c6e6f13SShiju Jose }
1010c6e6f13SShiju Jose
cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context * cxl_ps_ctx,u8 * cap,u16 * cycle,u8 * flags,u8 * min_cycle)1020c6e6f13SShiju Jose static int cxl_scrub_get_attrbs(struct cxl_patrol_scrub_context *cxl_ps_ctx,
1030c6e6f13SShiju Jose u8 *cap, u16 *cycle, u8 *flags, u8 *min_cycle)
1040c6e6f13SShiju Jose {
1050c6e6f13SShiju Jose struct cxl_mailbox *cxl_mbox;
1060c6e6f13SShiju Jose struct cxl_region_params *p;
1070c6e6f13SShiju Jose struct cxl_memdev *cxlmd;
1080c6e6f13SShiju Jose struct cxl_region *cxlr;
109fdc9be90SLi Ming u8 min_scrub_cycle = 0;
1100c6e6f13SShiju Jose int i, ret;
1110c6e6f13SShiju Jose
1120c6e6f13SShiju Jose if (!cxl_ps_ctx->cxlr) {
1130c6e6f13SShiju Jose cxl_mbox = &cxl_ps_ctx->cxlmd->cxlds->cxl_mbox;
1140c6e6f13SShiju Jose return cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle,
1150c6e6f13SShiju Jose flags, min_cycle);
1160c6e6f13SShiju Jose }
1170c6e6f13SShiju Jose
1180c6e6f13SShiju Jose struct rw_semaphore *region_lock __free(rwsem_read_release) =
1190c6e6f13SShiju Jose rwsem_read_intr_acquire(&cxl_region_rwsem);
1200c6e6f13SShiju Jose if (!region_lock)
1210c6e6f13SShiju Jose return -EINTR;
1220c6e6f13SShiju Jose
1230c6e6f13SShiju Jose cxlr = cxl_ps_ctx->cxlr;
1240c6e6f13SShiju Jose p = &cxlr->params;
1250c6e6f13SShiju Jose
1260c6e6f13SShiju Jose for (i = 0; i < p->nr_targets; i++) {
1270c6e6f13SShiju Jose struct cxl_endpoint_decoder *cxled = p->targets[i];
1280c6e6f13SShiju Jose
1290c6e6f13SShiju Jose cxlmd = cxled_to_memdev(cxled);
1300c6e6f13SShiju Jose cxl_mbox = &cxlmd->cxlds->cxl_mbox;
1310c6e6f13SShiju Jose ret = cxl_mem_scrub_get_attrbs(cxl_mbox, cap, cycle, flags,
1320c6e6f13SShiju Jose min_cycle);
1330c6e6f13SShiju Jose if (ret)
1340c6e6f13SShiju Jose return ret;
1350c6e6f13SShiju Jose
136fdc9be90SLi Ming /*
137fdc9be90SLi Ming * The min_scrub_cycle of a region is the max of minimum scrub
138fdc9be90SLi Ming * cycles supported by memdevs that back the region.
139fdc9be90SLi Ming */
1400c6e6f13SShiju Jose if (min_cycle)
141fdc9be90SLi Ming min_scrub_cycle = max(*min_cycle, min_scrub_cycle);
1420c6e6f13SShiju Jose }
1430c6e6f13SShiju Jose
1440c6e6f13SShiju Jose if (min_cycle)
1450c6e6f13SShiju Jose *min_cycle = min_scrub_cycle;
1460c6e6f13SShiju Jose
1470c6e6f13SShiju Jose return 0;
1480c6e6f13SShiju Jose }
1490c6e6f13SShiju Jose
cxl_scrub_set_attrbs_region(struct device * dev,struct cxl_patrol_scrub_context * cxl_ps_ctx,u8 cycle,u8 flags)1500c6e6f13SShiju Jose static int cxl_scrub_set_attrbs_region(struct device *dev,
1510c6e6f13SShiju Jose struct cxl_patrol_scrub_context *cxl_ps_ctx,
1520c6e6f13SShiju Jose u8 cycle, u8 flags)
1530c6e6f13SShiju Jose {
1540c6e6f13SShiju Jose struct cxl_scrub_wr_attrbs wr_attrbs;
1550c6e6f13SShiju Jose struct cxl_mailbox *cxl_mbox;
1560c6e6f13SShiju Jose struct cxl_region_params *p;
1570c6e6f13SShiju Jose struct cxl_memdev *cxlmd;
1580c6e6f13SShiju Jose struct cxl_region *cxlr;
1590c6e6f13SShiju Jose int ret, i;
1600c6e6f13SShiju Jose
1610c6e6f13SShiju Jose struct rw_semaphore *region_lock __free(rwsem_read_release) =
1620c6e6f13SShiju Jose rwsem_read_intr_acquire(&cxl_region_rwsem);
1630c6e6f13SShiju Jose if (!region_lock)
1640c6e6f13SShiju Jose return -EINTR;
1650c6e6f13SShiju Jose
1660c6e6f13SShiju Jose cxlr = cxl_ps_ctx->cxlr;
1670c6e6f13SShiju Jose p = &cxlr->params;
1680c6e6f13SShiju Jose wr_attrbs.scrub_cycle_hours = cycle;
1690c6e6f13SShiju Jose wr_attrbs.scrub_flags = flags;
1700c6e6f13SShiju Jose
1710c6e6f13SShiju Jose for (i = 0; i < p->nr_targets; i++) {
1720c6e6f13SShiju Jose struct cxl_endpoint_decoder *cxled = p->targets[i];
1730c6e6f13SShiju Jose
1740c6e6f13SShiju Jose cxlmd = cxled_to_memdev(cxled);
1750c6e6f13SShiju Jose cxl_mbox = &cxlmd->cxlds->cxl_mbox;
1760c6e6f13SShiju Jose ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
1770c6e6f13SShiju Jose cxl_ps_ctx->set_version, &wr_attrbs,
1780c6e6f13SShiju Jose sizeof(wr_attrbs),
1790c6e6f13SShiju Jose CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
1800c6e6f13SShiju Jose 0, NULL);
1810c6e6f13SShiju Jose if (ret)
1820c6e6f13SShiju Jose return ret;
1830c6e6f13SShiju Jose
1840c6e6f13SShiju Jose if (cycle != cxlmd->scrub_cycle) {
1850c6e6f13SShiju Jose if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
1860c6e6f13SShiju Jose dev_info(dev,
1870c6e6f13SShiju Jose "Device scrub rate(%d hours) set by region%d rate overwritten by region%d scrub rate(%d hours)\n",
1880c6e6f13SShiju Jose cxlmd->scrub_cycle,
1890c6e6f13SShiju Jose cxlmd->scrub_region_id, cxlr->id,
1900c6e6f13SShiju Jose cycle);
1910c6e6f13SShiju Jose
1920c6e6f13SShiju Jose cxlmd->scrub_cycle = cycle;
1930c6e6f13SShiju Jose cxlmd->scrub_region_id = cxlr->id;
1940c6e6f13SShiju Jose }
1950c6e6f13SShiju Jose }
1960c6e6f13SShiju Jose
1970c6e6f13SShiju Jose return 0;
1980c6e6f13SShiju Jose }
1990c6e6f13SShiju Jose
cxl_scrub_set_attrbs_device(struct device * dev,struct cxl_patrol_scrub_context * cxl_ps_ctx,u8 cycle,u8 flags)2000c6e6f13SShiju Jose static int cxl_scrub_set_attrbs_device(struct device *dev,
2010c6e6f13SShiju Jose struct cxl_patrol_scrub_context *cxl_ps_ctx,
2020c6e6f13SShiju Jose u8 cycle, u8 flags)
2030c6e6f13SShiju Jose {
2040c6e6f13SShiju Jose struct cxl_scrub_wr_attrbs wr_attrbs;
2050c6e6f13SShiju Jose struct cxl_mailbox *cxl_mbox;
2060c6e6f13SShiju Jose struct cxl_memdev *cxlmd;
2070c6e6f13SShiju Jose int ret;
2080c6e6f13SShiju Jose
2090c6e6f13SShiju Jose wr_attrbs.scrub_cycle_hours = cycle;
2100c6e6f13SShiju Jose wr_attrbs.scrub_flags = flags;
2110c6e6f13SShiju Jose
2120c6e6f13SShiju Jose cxlmd = cxl_ps_ctx->cxlmd;
2130c6e6f13SShiju Jose cxl_mbox = &cxlmd->cxlds->cxl_mbox;
2140c6e6f13SShiju Jose ret = cxl_set_feature(cxl_mbox, &CXL_FEAT_PATROL_SCRUB_UUID,
2150c6e6f13SShiju Jose cxl_ps_ctx->set_version, &wr_attrbs,
2160c6e6f13SShiju Jose sizeof(wr_attrbs),
2170c6e6f13SShiju Jose CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET, 0,
2180c6e6f13SShiju Jose NULL);
2190c6e6f13SShiju Jose if (ret)
2200c6e6f13SShiju Jose return ret;
2210c6e6f13SShiju Jose
2220c6e6f13SShiju Jose if (cycle != cxlmd->scrub_cycle) {
2230c6e6f13SShiju Jose if (cxlmd->scrub_region_id != CXL_SCRUB_NO_REGION)
2240c6e6f13SShiju Jose dev_info(dev,
2250c6e6f13SShiju Jose "Device scrub rate(%d hours) set by region%d rate overwritten with device local scrub rate(%d hours)\n",
2260c6e6f13SShiju Jose cxlmd->scrub_cycle, cxlmd->scrub_region_id,
2270c6e6f13SShiju Jose cycle);
2280c6e6f13SShiju Jose
2290c6e6f13SShiju Jose cxlmd->scrub_cycle = cycle;
2300c6e6f13SShiju Jose cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
2310c6e6f13SShiju Jose }
2320c6e6f13SShiju Jose
2330c6e6f13SShiju Jose return 0;
2340c6e6f13SShiju Jose }
2350c6e6f13SShiju Jose
cxl_scrub_set_attrbs(struct device * dev,struct cxl_patrol_scrub_context * cxl_ps_ctx,u8 cycle,u8 flags)2360c6e6f13SShiju Jose static int cxl_scrub_set_attrbs(struct device *dev,
2370c6e6f13SShiju Jose struct cxl_patrol_scrub_context *cxl_ps_ctx,
2380c6e6f13SShiju Jose u8 cycle, u8 flags)
2390c6e6f13SShiju Jose {
2400c6e6f13SShiju Jose if (cxl_ps_ctx->cxlr)
2410c6e6f13SShiju Jose return cxl_scrub_set_attrbs_region(dev, cxl_ps_ctx, cycle, flags);
2420c6e6f13SShiju Jose
2430c6e6f13SShiju Jose return cxl_scrub_set_attrbs_device(dev, cxl_ps_ctx, cycle, flags);
2440c6e6f13SShiju Jose }
2450c6e6f13SShiju Jose
cxl_patrol_scrub_get_enabled_bg(struct device * dev,void * drv_data,bool * enabled)2460c6e6f13SShiju Jose static int cxl_patrol_scrub_get_enabled_bg(struct device *dev, void *drv_data,
2470c6e6f13SShiju Jose bool *enabled)
2480c6e6f13SShiju Jose {
2490c6e6f13SShiju Jose struct cxl_patrol_scrub_context *ctx = drv_data;
2500c6e6f13SShiju Jose u8 cap, flags;
2510c6e6f13SShiju Jose u16 cycle;
2520c6e6f13SShiju Jose int ret;
2530c6e6f13SShiju Jose
2540c6e6f13SShiju Jose ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
2550c6e6f13SShiju Jose if (ret)
2560c6e6f13SShiju Jose return ret;
2570c6e6f13SShiju Jose
2580c6e6f13SShiju Jose *enabled = CXL_GET_SCRUB_EN_STS(flags);
2590c6e6f13SShiju Jose
2600c6e6f13SShiju Jose return 0;
2610c6e6f13SShiju Jose }
2620c6e6f13SShiju Jose
cxl_patrol_scrub_set_enabled_bg(struct device * dev,void * drv_data,bool enable)2630c6e6f13SShiju Jose static int cxl_patrol_scrub_set_enabled_bg(struct device *dev, void *drv_data,
2640c6e6f13SShiju Jose bool enable)
2650c6e6f13SShiju Jose {
2660c6e6f13SShiju Jose struct cxl_patrol_scrub_context *ctx = drv_data;
2670c6e6f13SShiju Jose u8 cap, flags, wr_cycle;
2680c6e6f13SShiju Jose u16 rd_cycle;
2690c6e6f13SShiju Jose int ret;
2700c6e6f13SShiju Jose
2710c6e6f13SShiju Jose if (!capable(CAP_SYS_RAWIO))
2720c6e6f13SShiju Jose return -EPERM;
2730c6e6f13SShiju Jose
2740c6e6f13SShiju Jose ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, NULL);
2750c6e6f13SShiju Jose if (ret)
2760c6e6f13SShiju Jose return ret;
2770c6e6f13SShiju Jose
2780c6e6f13SShiju Jose wr_cycle = CXL_GET_SCRUB_CYCLE(rd_cycle);
2790c6e6f13SShiju Jose flags = CXL_SET_SCRUB_EN(enable);
2800c6e6f13SShiju Jose
2810c6e6f13SShiju Jose return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
2820c6e6f13SShiju Jose }
2830c6e6f13SShiju Jose
cxl_patrol_scrub_get_min_scrub_cycle(struct device * dev,void * drv_data,u32 * min)2840c6e6f13SShiju Jose static int cxl_patrol_scrub_get_min_scrub_cycle(struct device *dev,
2850c6e6f13SShiju Jose void *drv_data, u32 *min)
2860c6e6f13SShiju Jose {
2870c6e6f13SShiju Jose struct cxl_patrol_scrub_context *ctx = drv_data;
2880c6e6f13SShiju Jose u8 cap, flags, min_cycle;
2890c6e6f13SShiju Jose u16 cycle;
2900c6e6f13SShiju Jose int ret;
2910c6e6f13SShiju Jose
2920c6e6f13SShiju Jose ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, &min_cycle);
2930c6e6f13SShiju Jose if (ret)
2940c6e6f13SShiju Jose return ret;
2950c6e6f13SShiju Jose
2960c6e6f13SShiju Jose *min = min_cycle * 3600;
2970c6e6f13SShiju Jose
2980c6e6f13SShiju Jose return 0;
2990c6e6f13SShiju Jose }
3000c6e6f13SShiju Jose
cxl_patrol_scrub_get_max_scrub_cycle(struct device * dev,void * drv_data,u32 * max)3010c6e6f13SShiju Jose static int cxl_patrol_scrub_get_max_scrub_cycle(struct device *dev,
3020c6e6f13SShiju Jose void *drv_data, u32 *max)
3030c6e6f13SShiju Jose {
3040c6e6f13SShiju Jose *max = U8_MAX * 3600; /* Max set by register size */
3050c6e6f13SShiju Jose
3060c6e6f13SShiju Jose return 0;
3070c6e6f13SShiju Jose }
3080c6e6f13SShiju Jose
cxl_patrol_scrub_get_scrub_cycle(struct device * dev,void * drv_data,u32 * scrub_cycle_secs)3090c6e6f13SShiju Jose static int cxl_patrol_scrub_get_scrub_cycle(struct device *dev, void *drv_data,
3100c6e6f13SShiju Jose u32 *scrub_cycle_secs)
3110c6e6f13SShiju Jose {
3120c6e6f13SShiju Jose struct cxl_patrol_scrub_context *ctx = drv_data;
3130c6e6f13SShiju Jose u8 cap, flags;
3140c6e6f13SShiju Jose u16 cycle;
3150c6e6f13SShiju Jose int ret;
3160c6e6f13SShiju Jose
3170c6e6f13SShiju Jose ret = cxl_scrub_get_attrbs(ctx, &cap, &cycle, &flags, NULL);
3180c6e6f13SShiju Jose if (ret)
3190c6e6f13SShiju Jose return ret;
3200c6e6f13SShiju Jose
3210c6e6f13SShiju Jose *scrub_cycle_secs = CXL_GET_SCRUB_CYCLE(cycle) * 3600;
3220c6e6f13SShiju Jose
3230c6e6f13SShiju Jose return 0;
3240c6e6f13SShiju Jose }
3250c6e6f13SShiju Jose
cxl_patrol_scrub_set_scrub_cycle(struct device * dev,void * drv_data,u32 scrub_cycle_secs)3260c6e6f13SShiju Jose static int cxl_patrol_scrub_set_scrub_cycle(struct device *dev, void *drv_data,
3270c6e6f13SShiju Jose u32 scrub_cycle_secs)
3280c6e6f13SShiju Jose {
3290c6e6f13SShiju Jose struct cxl_patrol_scrub_context *ctx = drv_data;
3300c6e6f13SShiju Jose u8 scrub_cycle_hours = scrub_cycle_secs / 3600;
3310c6e6f13SShiju Jose u8 cap, wr_cycle, flags, min_cycle;
3320c6e6f13SShiju Jose u16 rd_cycle;
3330c6e6f13SShiju Jose int ret;
3340c6e6f13SShiju Jose
3350c6e6f13SShiju Jose if (!capable(CAP_SYS_RAWIO))
3360c6e6f13SShiju Jose return -EPERM;
3370c6e6f13SShiju Jose
3380c6e6f13SShiju Jose ret = cxl_scrub_get_attrbs(ctx, &cap, &rd_cycle, &flags, &min_cycle);
3390c6e6f13SShiju Jose if (ret)
3400c6e6f13SShiju Jose return ret;
3410c6e6f13SShiju Jose
3420c6e6f13SShiju Jose if (!CXL_GET_SCRUB_CYCLE_CHANGEABLE(cap))
3430c6e6f13SShiju Jose return -EOPNOTSUPP;
3440c6e6f13SShiju Jose
3450c6e6f13SShiju Jose if (scrub_cycle_hours < min_cycle) {
3460c6e6f13SShiju Jose dev_dbg(dev, "Invalid CXL patrol scrub cycle(%d) to set\n",
3470c6e6f13SShiju Jose scrub_cycle_hours);
3480c6e6f13SShiju Jose dev_dbg(dev,
3490c6e6f13SShiju Jose "Minimum supported CXL patrol scrub cycle in hour %d\n",
3500c6e6f13SShiju Jose min_cycle);
3510c6e6f13SShiju Jose return -EINVAL;
3520c6e6f13SShiju Jose }
3530c6e6f13SShiju Jose wr_cycle = CXL_SET_SCRUB_CYCLE(scrub_cycle_hours);
3540c6e6f13SShiju Jose
3550c6e6f13SShiju Jose return cxl_scrub_set_attrbs(dev, ctx, wr_cycle, flags);
3560c6e6f13SShiju Jose }
3570c6e6f13SShiju Jose
3580c6e6f13SShiju Jose static const struct edac_scrub_ops cxl_ps_scrub_ops = {
3590c6e6f13SShiju Jose .get_enabled_bg = cxl_patrol_scrub_get_enabled_bg,
3600c6e6f13SShiju Jose .set_enabled_bg = cxl_patrol_scrub_set_enabled_bg,
3610c6e6f13SShiju Jose .get_min_cycle = cxl_patrol_scrub_get_min_scrub_cycle,
3620c6e6f13SShiju Jose .get_max_cycle = cxl_patrol_scrub_get_max_scrub_cycle,
3630c6e6f13SShiju Jose .get_cycle_duration = cxl_patrol_scrub_get_scrub_cycle,
3640c6e6f13SShiju Jose .set_cycle_duration = cxl_patrol_scrub_set_scrub_cycle,
3650c6e6f13SShiju Jose };
3660c6e6f13SShiju Jose
cxl_memdev_scrub_init(struct cxl_memdev * cxlmd,struct edac_dev_feature * ras_feature,u8 scrub_inst)3670c6e6f13SShiju Jose static int cxl_memdev_scrub_init(struct cxl_memdev *cxlmd,
3680c6e6f13SShiju Jose struct edac_dev_feature *ras_feature,
3690c6e6f13SShiju Jose u8 scrub_inst)
3700c6e6f13SShiju Jose {
3710c6e6f13SShiju Jose struct cxl_patrol_scrub_context *cxl_ps_ctx;
3720c6e6f13SShiju Jose struct cxl_feat_entry *feat_entry;
3730c6e6f13SShiju Jose u8 cap, flags;
3740c6e6f13SShiju Jose u16 cycle;
3750c6e6f13SShiju Jose int rc;
3760c6e6f13SShiju Jose
3770c6e6f13SShiju Jose feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
3780c6e6f13SShiju Jose &CXL_FEAT_PATROL_SCRUB_UUID);
3790c6e6f13SShiju Jose if (IS_ERR(feat_entry))
3800c6e6f13SShiju Jose return -EOPNOTSUPP;
3810c6e6f13SShiju Jose
3820c6e6f13SShiju Jose if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
3830c6e6f13SShiju Jose return -EOPNOTSUPP;
3840c6e6f13SShiju Jose
3850c6e6f13SShiju Jose cxl_ps_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
3860c6e6f13SShiju Jose if (!cxl_ps_ctx)
3870c6e6f13SShiju Jose return -ENOMEM;
3880c6e6f13SShiju Jose
3890c6e6f13SShiju Jose *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
3900c6e6f13SShiju Jose .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
3910c6e6f13SShiju Jose .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
3920c6e6f13SShiju Jose .get_version = feat_entry->get_feat_ver,
3930c6e6f13SShiju Jose .set_version = feat_entry->set_feat_ver,
3940c6e6f13SShiju Jose .effects = le16_to_cpu(feat_entry->effects),
3950c6e6f13SShiju Jose .instance = scrub_inst,
3960c6e6f13SShiju Jose .cxlmd = cxlmd,
3970c6e6f13SShiju Jose };
3980c6e6f13SShiju Jose
3990c6e6f13SShiju Jose rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap, &cycle,
4000c6e6f13SShiju Jose &flags, NULL);
4010c6e6f13SShiju Jose if (rc)
4020c6e6f13SShiju Jose return rc;
4030c6e6f13SShiju Jose
4040c6e6f13SShiju Jose cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
4050c6e6f13SShiju Jose cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
4060c6e6f13SShiju Jose
4070c6e6f13SShiju Jose ras_feature->ft_type = RAS_FEAT_SCRUB;
4080c6e6f13SShiju Jose ras_feature->instance = cxl_ps_ctx->instance;
4090c6e6f13SShiju Jose ras_feature->scrub_ops = &cxl_ps_scrub_ops;
4100c6e6f13SShiju Jose ras_feature->ctx = cxl_ps_ctx;
4110c6e6f13SShiju Jose
4120c6e6f13SShiju Jose return 0;
4130c6e6f13SShiju Jose }
4140c6e6f13SShiju Jose
cxl_region_scrub_init(struct cxl_region * cxlr,struct edac_dev_feature * ras_feature,u8 scrub_inst)4150c6e6f13SShiju Jose static int cxl_region_scrub_init(struct cxl_region *cxlr,
4160c6e6f13SShiju Jose struct edac_dev_feature *ras_feature,
4170c6e6f13SShiju Jose u8 scrub_inst)
4180c6e6f13SShiju Jose {
4190c6e6f13SShiju Jose struct cxl_patrol_scrub_context *cxl_ps_ctx;
4200c6e6f13SShiju Jose struct cxl_region_params *p = &cxlr->params;
4210c6e6f13SShiju Jose struct cxl_feat_entry *feat_entry = NULL;
4220c6e6f13SShiju Jose struct cxl_memdev *cxlmd;
4230c6e6f13SShiju Jose u8 cap, flags;
4240c6e6f13SShiju Jose u16 cycle;
4250c6e6f13SShiju Jose int i, rc;
4260c6e6f13SShiju Jose
4270c6e6f13SShiju Jose /*
4280c6e6f13SShiju Jose * The cxl_region_rwsem must be held if the code below is used in a context
4290c6e6f13SShiju Jose * other than when the region is in the probe state, as shown here.
4300c6e6f13SShiju Jose */
4310c6e6f13SShiju Jose for (i = 0; i < p->nr_targets; i++) {
4320c6e6f13SShiju Jose struct cxl_endpoint_decoder *cxled = p->targets[i];
4330c6e6f13SShiju Jose
4340c6e6f13SShiju Jose cxlmd = cxled_to_memdev(cxled);
4350c6e6f13SShiju Jose feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
4360c6e6f13SShiju Jose &CXL_FEAT_PATROL_SCRUB_UUID);
4370c6e6f13SShiju Jose if (IS_ERR(feat_entry))
4380c6e6f13SShiju Jose return -EOPNOTSUPP;
4390c6e6f13SShiju Jose
4400c6e6f13SShiju Jose if (!(le32_to_cpu(feat_entry->flags) &
4410c6e6f13SShiju Jose CXL_FEATURE_F_CHANGEABLE))
4420c6e6f13SShiju Jose return -EOPNOTSUPP;
4430c6e6f13SShiju Jose
4440c6e6f13SShiju Jose rc = cxl_mem_scrub_get_attrbs(&cxlmd->cxlds->cxl_mbox, &cap,
4450c6e6f13SShiju Jose &cycle, &flags, NULL);
4460c6e6f13SShiju Jose if (rc)
4470c6e6f13SShiju Jose return rc;
4480c6e6f13SShiju Jose
4490c6e6f13SShiju Jose cxlmd->scrub_cycle = CXL_GET_SCRUB_CYCLE(cycle);
4500c6e6f13SShiju Jose cxlmd->scrub_region_id = CXL_SCRUB_NO_REGION;
4510c6e6f13SShiju Jose }
4520c6e6f13SShiju Jose
4530c6e6f13SShiju Jose cxl_ps_ctx = devm_kzalloc(&cxlr->dev, sizeof(*cxl_ps_ctx), GFP_KERNEL);
4540c6e6f13SShiju Jose if (!cxl_ps_ctx)
4550c6e6f13SShiju Jose return -ENOMEM;
4560c6e6f13SShiju Jose
4570c6e6f13SShiju Jose *cxl_ps_ctx = (struct cxl_patrol_scrub_context){
4580c6e6f13SShiju Jose .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
4590c6e6f13SShiju Jose .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
4600c6e6f13SShiju Jose .get_version = feat_entry->get_feat_ver,
4610c6e6f13SShiju Jose .set_version = feat_entry->set_feat_ver,
4620c6e6f13SShiju Jose .effects = le16_to_cpu(feat_entry->effects),
4630c6e6f13SShiju Jose .instance = scrub_inst,
4640c6e6f13SShiju Jose .cxlr = cxlr,
4650c6e6f13SShiju Jose };
4660c6e6f13SShiju Jose
4670c6e6f13SShiju Jose ras_feature->ft_type = RAS_FEAT_SCRUB;
4680c6e6f13SShiju Jose ras_feature->instance = cxl_ps_ctx->instance;
4690c6e6f13SShiju Jose ras_feature->scrub_ops = &cxl_ps_scrub_ops;
4700c6e6f13SShiju Jose ras_feature->ctx = cxl_ps_ctx;
4710c6e6f13SShiju Jose
4720c6e6f13SShiju Jose return 0;
4730c6e6f13SShiju Jose }
4740c6e6f13SShiju Jose
47585fb6a16SShiju Jose struct cxl_ecs_context {
47685fb6a16SShiju Jose u16 num_media_frus;
47785fb6a16SShiju Jose u16 get_feat_size;
47885fb6a16SShiju Jose u16 set_feat_size;
47985fb6a16SShiju Jose u8 get_version;
48085fb6a16SShiju Jose u8 set_version;
48185fb6a16SShiju Jose u16 effects;
48285fb6a16SShiju Jose struct cxl_memdev *cxlmd;
48385fb6a16SShiju Jose };
48485fb6a16SShiju Jose
48585fb6a16SShiju Jose /*
48685fb6a16SShiju Jose * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-225 DDR5 ECS Control Feature
48785fb6a16SShiju Jose * Readable Attributes.
48885fb6a16SShiju Jose */
48985fb6a16SShiju Jose struct cxl_ecs_fru_rd_attrbs {
49085fb6a16SShiju Jose u8 ecs_cap;
49185fb6a16SShiju Jose __le16 ecs_config;
49285fb6a16SShiju Jose u8 ecs_flags;
49385fb6a16SShiju Jose } __packed;
49485fb6a16SShiju Jose
49585fb6a16SShiju Jose struct cxl_ecs_rd_attrbs {
49685fb6a16SShiju Jose u8 ecs_log_cap;
49785fb6a16SShiju Jose struct cxl_ecs_fru_rd_attrbs fru_attrbs[];
49885fb6a16SShiju Jose } __packed;
49985fb6a16SShiju Jose
50085fb6a16SShiju Jose /*
50185fb6a16SShiju Jose * See CXL spec rev 3.2 @8.2.10.9.11.2 Table 8-226 DDR5 ECS Control Feature
50285fb6a16SShiju Jose * Writable Attributes.
50385fb6a16SShiju Jose */
50485fb6a16SShiju Jose struct cxl_ecs_fru_wr_attrbs {
50585fb6a16SShiju Jose __le16 ecs_config;
50685fb6a16SShiju Jose } __packed;
50785fb6a16SShiju Jose
50885fb6a16SShiju Jose struct cxl_ecs_wr_attrbs {
50985fb6a16SShiju Jose u8 ecs_log_cap;
51085fb6a16SShiju Jose struct cxl_ecs_fru_wr_attrbs fru_attrbs[];
51185fb6a16SShiju Jose } __packed;
51285fb6a16SShiju Jose
51385fb6a16SShiju Jose #define CXL_ECS_LOG_ENTRY_TYPE_MASK GENMASK(1, 0)
51485fb6a16SShiju Jose #define CXL_ECS_REALTIME_REPORT_CAP_MASK BIT(0)
51585fb6a16SShiju Jose #define CXL_ECS_THRESHOLD_COUNT_MASK GENMASK(2, 0)
51685fb6a16SShiju Jose #define CXL_ECS_COUNT_MODE_MASK BIT(3)
51785fb6a16SShiju Jose #define CXL_ECS_RESET_COUNTER_MASK BIT(4)
51885fb6a16SShiju Jose #define CXL_ECS_RESET_COUNTER 1
51985fb6a16SShiju Jose
52085fb6a16SShiju Jose enum {
52185fb6a16SShiju Jose ECS_THRESHOLD_256 = 256,
52285fb6a16SShiju Jose ECS_THRESHOLD_1024 = 1024,
52385fb6a16SShiju Jose ECS_THRESHOLD_4096 = 4096,
52485fb6a16SShiju Jose };
52585fb6a16SShiju Jose
52685fb6a16SShiju Jose enum {
52785fb6a16SShiju Jose ECS_THRESHOLD_IDX_256 = 3,
52885fb6a16SShiju Jose ECS_THRESHOLD_IDX_1024 = 4,
52985fb6a16SShiju Jose ECS_THRESHOLD_IDX_4096 = 5,
53085fb6a16SShiju Jose };
53185fb6a16SShiju Jose
53285fb6a16SShiju Jose static const u16 ecs_supp_threshold[] = {
53385fb6a16SShiju Jose [ECS_THRESHOLD_IDX_256] = 256,
53485fb6a16SShiju Jose [ECS_THRESHOLD_IDX_1024] = 1024,
53585fb6a16SShiju Jose [ECS_THRESHOLD_IDX_4096] = 4096,
53685fb6a16SShiju Jose };
53785fb6a16SShiju Jose
53885fb6a16SShiju Jose enum {
53985fb6a16SShiju Jose ECS_LOG_ENTRY_TYPE_DRAM = 0x0,
54085fb6a16SShiju Jose ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU = 0x1,
54185fb6a16SShiju Jose };
54285fb6a16SShiju Jose
54385fb6a16SShiju Jose enum cxl_ecs_count_mode {
54485fb6a16SShiju Jose ECS_MODE_COUNTS_ROWS = 0,
54585fb6a16SShiju Jose ECS_MODE_COUNTS_CODEWORDS = 1,
54685fb6a16SShiju Jose };
54785fb6a16SShiju Jose
cxl_mem_ecs_get_attrbs(struct device * dev,struct cxl_ecs_context * cxl_ecs_ctx,int fru_id,u8 * log_cap,u16 * config)54885fb6a16SShiju Jose static int cxl_mem_ecs_get_attrbs(struct device *dev,
54985fb6a16SShiju Jose struct cxl_ecs_context *cxl_ecs_ctx,
55085fb6a16SShiju Jose int fru_id, u8 *log_cap, u16 *config)
55185fb6a16SShiju Jose {
55285fb6a16SShiju Jose struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
55385fb6a16SShiju Jose struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
55485fb6a16SShiju Jose struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
55585fb6a16SShiju Jose size_t rd_data_size;
55685fb6a16SShiju Jose size_t data_size;
55785fb6a16SShiju Jose
55885fb6a16SShiju Jose rd_data_size = cxl_ecs_ctx->get_feat_size;
55985fb6a16SShiju Jose
56085fb6a16SShiju Jose struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
56185fb6a16SShiju Jose kvzalloc(rd_data_size, GFP_KERNEL);
56285fb6a16SShiju Jose if (!rd_attrbs)
56385fb6a16SShiju Jose return -ENOMEM;
56485fb6a16SShiju Jose
56585fb6a16SShiju Jose data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
56685fb6a16SShiju Jose CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
56785fb6a16SShiju Jose rd_data_size, 0, NULL);
56885fb6a16SShiju Jose if (!data_size)
56985fb6a16SShiju Jose return -EIO;
57085fb6a16SShiju Jose
57185fb6a16SShiju Jose fru_rd_attrbs = rd_attrbs->fru_attrbs;
57285fb6a16SShiju Jose *log_cap = rd_attrbs->ecs_log_cap;
57385fb6a16SShiju Jose *config = le16_to_cpu(fru_rd_attrbs[fru_id].ecs_config);
57485fb6a16SShiju Jose
57585fb6a16SShiju Jose return 0;
57685fb6a16SShiju Jose }
57785fb6a16SShiju Jose
cxl_mem_ecs_set_attrbs(struct device * dev,struct cxl_ecs_context * cxl_ecs_ctx,int fru_id,u8 log_cap,u16 config)57885fb6a16SShiju Jose static int cxl_mem_ecs_set_attrbs(struct device *dev,
57985fb6a16SShiju Jose struct cxl_ecs_context *cxl_ecs_ctx,
58085fb6a16SShiju Jose int fru_id, u8 log_cap, u16 config)
58185fb6a16SShiju Jose {
58285fb6a16SShiju Jose struct cxl_memdev *cxlmd = cxl_ecs_ctx->cxlmd;
58385fb6a16SShiju Jose struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
58485fb6a16SShiju Jose struct cxl_ecs_fru_rd_attrbs *fru_rd_attrbs;
58585fb6a16SShiju Jose struct cxl_ecs_fru_wr_attrbs *fru_wr_attrbs;
58685fb6a16SShiju Jose size_t rd_data_size, wr_data_size;
58785fb6a16SShiju Jose u16 num_media_frus, count;
58885fb6a16SShiju Jose size_t data_size;
58985fb6a16SShiju Jose
59085fb6a16SShiju Jose num_media_frus = cxl_ecs_ctx->num_media_frus;
59185fb6a16SShiju Jose rd_data_size = cxl_ecs_ctx->get_feat_size;
59285fb6a16SShiju Jose wr_data_size = cxl_ecs_ctx->set_feat_size;
59385fb6a16SShiju Jose struct cxl_ecs_rd_attrbs *rd_attrbs __free(kvfree) =
59485fb6a16SShiju Jose kvzalloc(rd_data_size, GFP_KERNEL);
59585fb6a16SShiju Jose if (!rd_attrbs)
59685fb6a16SShiju Jose return -ENOMEM;
59785fb6a16SShiju Jose
59885fb6a16SShiju Jose data_size = cxl_get_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
59985fb6a16SShiju Jose CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
60085fb6a16SShiju Jose rd_data_size, 0, NULL);
60185fb6a16SShiju Jose if (!data_size)
60285fb6a16SShiju Jose return -EIO;
60385fb6a16SShiju Jose
60485fb6a16SShiju Jose struct cxl_ecs_wr_attrbs *wr_attrbs __free(kvfree) =
60585fb6a16SShiju Jose kvzalloc(wr_data_size, GFP_KERNEL);
60685fb6a16SShiju Jose if (!wr_attrbs)
60785fb6a16SShiju Jose return -ENOMEM;
60885fb6a16SShiju Jose
60985fb6a16SShiju Jose /*
61085fb6a16SShiju Jose * Fill writable attributes from the current attributes read
61185fb6a16SShiju Jose * for all the media FRUs.
61285fb6a16SShiju Jose */
61385fb6a16SShiju Jose fru_rd_attrbs = rd_attrbs->fru_attrbs;
61485fb6a16SShiju Jose fru_wr_attrbs = wr_attrbs->fru_attrbs;
61585fb6a16SShiju Jose wr_attrbs->ecs_log_cap = log_cap;
61685fb6a16SShiju Jose for (count = 0; count < num_media_frus; count++)
61785fb6a16SShiju Jose fru_wr_attrbs[count].ecs_config =
61885fb6a16SShiju Jose fru_rd_attrbs[count].ecs_config;
61985fb6a16SShiju Jose
62085fb6a16SShiju Jose fru_wr_attrbs[fru_id].ecs_config = cpu_to_le16(config);
62185fb6a16SShiju Jose
62285fb6a16SShiju Jose return cxl_set_feature(cxl_mbox, &CXL_FEAT_ECS_UUID,
62385fb6a16SShiju Jose cxl_ecs_ctx->set_version, wr_attrbs,
62485fb6a16SShiju Jose wr_data_size,
62585fb6a16SShiju Jose CXL_SET_FEAT_FLAG_DATA_SAVED_ACROSS_RESET,
62685fb6a16SShiju Jose 0, NULL);
62785fb6a16SShiju Jose }
62885fb6a16SShiju Jose
cxl_get_ecs_log_entry_type(u8 log_cap,u16 config)62985fb6a16SShiju Jose static u8 cxl_get_ecs_log_entry_type(u8 log_cap, u16 config)
63085fb6a16SShiju Jose {
63185fb6a16SShiju Jose return FIELD_GET(CXL_ECS_LOG_ENTRY_TYPE_MASK, log_cap);
63285fb6a16SShiju Jose }
63385fb6a16SShiju Jose
cxl_get_ecs_threshold(u8 log_cap,u16 config)63485fb6a16SShiju Jose static u16 cxl_get_ecs_threshold(u8 log_cap, u16 config)
63585fb6a16SShiju Jose {
63685fb6a16SShiju Jose u8 index = FIELD_GET(CXL_ECS_THRESHOLD_COUNT_MASK, config);
63785fb6a16SShiju Jose
63885fb6a16SShiju Jose return ecs_supp_threshold[index];
63985fb6a16SShiju Jose }
64085fb6a16SShiju Jose
cxl_get_ecs_count_mode(u8 log_cap,u16 config)64185fb6a16SShiju Jose static u8 cxl_get_ecs_count_mode(u8 log_cap, u16 config)
64285fb6a16SShiju Jose {
64385fb6a16SShiju Jose return FIELD_GET(CXL_ECS_COUNT_MODE_MASK, config);
64485fb6a16SShiju Jose }
64585fb6a16SShiju Jose
64685fb6a16SShiju Jose #define CXL_ECS_GET_ATTR(attrb) \
64785fb6a16SShiju Jose static int cxl_ecs_get_##attrb(struct device *dev, void *drv_data, \
64885fb6a16SShiju Jose int fru_id, u32 *val) \
64985fb6a16SShiju Jose { \
65085fb6a16SShiju Jose struct cxl_ecs_context *ctx = drv_data; \
65185fb6a16SShiju Jose u8 log_cap; \
65285fb6a16SShiju Jose u16 config; \
65385fb6a16SShiju Jose int ret; \
65485fb6a16SShiju Jose \
65585fb6a16SShiju Jose ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
65685fb6a16SShiju Jose &config); \
65785fb6a16SShiju Jose if (ret) \
65885fb6a16SShiju Jose return ret; \
65985fb6a16SShiju Jose \
66085fb6a16SShiju Jose *val = cxl_get_ecs_##attrb(log_cap, config); \
66185fb6a16SShiju Jose \
66285fb6a16SShiju Jose return 0; \
66385fb6a16SShiju Jose }
66485fb6a16SShiju Jose
66585fb6a16SShiju Jose CXL_ECS_GET_ATTR(log_entry_type)
CXL_ECS_GET_ATTR(count_mode)66685fb6a16SShiju Jose CXL_ECS_GET_ATTR(count_mode)
66785fb6a16SShiju Jose CXL_ECS_GET_ATTR(threshold)
66885fb6a16SShiju Jose
66985fb6a16SShiju Jose static int cxl_set_ecs_log_entry_type(struct device *dev, u8 *log_cap,
67085fb6a16SShiju Jose u16 *config, u32 val)
67185fb6a16SShiju Jose {
67285fb6a16SShiju Jose if (val != ECS_LOG_ENTRY_TYPE_DRAM &&
67385fb6a16SShiju Jose val != ECS_LOG_ENTRY_TYPE_MEM_MEDIA_FRU)
67485fb6a16SShiju Jose return -EINVAL;
67585fb6a16SShiju Jose
67685fb6a16SShiju Jose *log_cap = FIELD_PREP(CXL_ECS_LOG_ENTRY_TYPE_MASK, val);
67785fb6a16SShiju Jose
67885fb6a16SShiju Jose return 0;
67985fb6a16SShiju Jose }
68085fb6a16SShiju Jose
cxl_set_ecs_threshold(struct device * dev,u8 * log_cap,u16 * config,u32 val)68185fb6a16SShiju Jose static int cxl_set_ecs_threshold(struct device *dev, u8 *log_cap, u16 *config,
68285fb6a16SShiju Jose u32 val)
68385fb6a16SShiju Jose {
68485fb6a16SShiju Jose *config &= ~CXL_ECS_THRESHOLD_COUNT_MASK;
68585fb6a16SShiju Jose
68685fb6a16SShiju Jose switch (val) {
68785fb6a16SShiju Jose case ECS_THRESHOLD_256:
68885fb6a16SShiju Jose *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
68985fb6a16SShiju Jose ECS_THRESHOLD_IDX_256);
69085fb6a16SShiju Jose break;
69185fb6a16SShiju Jose case ECS_THRESHOLD_1024:
69285fb6a16SShiju Jose *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
69385fb6a16SShiju Jose ECS_THRESHOLD_IDX_1024);
69485fb6a16SShiju Jose break;
69585fb6a16SShiju Jose case ECS_THRESHOLD_4096:
69685fb6a16SShiju Jose *config |= FIELD_PREP(CXL_ECS_THRESHOLD_COUNT_MASK,
69785fb6a16SShiju Jose ECS_THRESHOLD_IDX_4096);
69885fb6a16SShiju Jose break;
69985fb6a16SShiju Jose default:
70085fb6a16SShiju Jose dev_dbg(dev, "Invalid CXL ECS threshold count(%d) to set\n",
70185fb6a16SShiju Jose val);
70285fb6a16SShiju Jose dev_dbg(dev, "Supported ECS threshold counts: %u, %u, %u\n",
70385fb6a16SShiju Jose ECS_THRESHOLD_256, ECS_THRESHOLD_1024,
70485fb6a16SShiju Jose ECS_THRESHOLD_4096);
70585fb6a16SShiju Jose return -EINVAL;
70685fb6a16SShiju Jose }
70785fb6a16SShiju Jose
70885fb6a16SShiju Jose return 0;
70985fb6a16SShiju Jose }
71085fb6a16SShiju Jose
cxl_set_ecs_count_mode(struct device * dev,u8 * log_cap,u16 * config,u32 val)71185fb6a16SShiju Jose static int cxl_set_ecs_count_mode(struct device *dev, u8 *log_cap, u16 *config,
71285fb6a16SShiju Jose u32 val)
71385fb6a16SShiju Jose {
71485fb6a16SShiju Jose if (val != ECS_MODE_COUNTS_ROWS && val != ECS_MODE_COUNTS_CODEWORDS) {
71585fb6a16SShiju Jose dev_dbg(dev, "Invalid CXL ECS scrub mode(%d) to set\n", val);
71685fb6a16SShiju Jose dev_dbg(dev,
71785fb6a16SShiju Jose "Supported ECS Modes: 0: ECS counts rows with errors,"
71885fb6a16SShiju Jose " 1: ECS counts codewords with errors\n");
71985fb6a16SShiju Jose return -EINVAL;
72085fb6a16SShiju Jose }
72185fb6a16SShiju Jose
72285fb6a16SShiju Jose *config &= ~CXL_ECS_COUNT_MODE_MASK;
72385fb6a16SShiju Jose *config |= FIELD_PREP(CXL_ECS_COUNT_MODE_MASK, val);
72485fb6a16SShiju Jose
72585fb6a16SShiju Jose return 0;
72685fb6a16SShiju Jose }
72785fb6a16SShiju Jose
cxl_set_ecs_reset_counter(struct device * dev,u8 * log_cap,u16 * config,u32 val)72885fb6a16SShiju Jose static int cxl_set_ecs_reset_counter(struct device *dev, u8 *log_cap,
72985fb6a16SShiju Jose u16 *config, u32 val)
73085fb6a16SShiju Jose {
73185fb6a16SShiju Jose if (val != CXL_ECS_RESET_COUNTER)
73285fb6a16SShiju Jose return -EINVAL;
73385fb6a16SShiju Jose
73485fb6a16SShiju Jose *config &= ~CXL_ECS_RESET_COUNTER_MASK;
73585fb6a16SShiju Jose *config |= FIELD_PREP(CXL_ECS_RESET_COUNTER_MASK, val);
73685fb6a16SShiju Jose
73785fb6a16SShiju Jose return 0;
73885fb6a16SShiju Jose }
73985fb6a16SShiju Jose
74085fb6a16SShiju Jose #define CXL_ECS_SET_ATTR(attrb) \
74185fb6a16SShiju Jose static int cxl_ecs_set_##attrb(struct device *dev, void *drv_data, \
74285fb6a16SShiju Jose int fru_id, u32 val) \
74385fb6a16SShiju Jose { \
74485fb6a16SShiju Jose struct cxl_ecs_context *ctx = drv_data; \
74585fb6a16SShiju Jose u8 log_cap; \
74685fb6a16SShiju Jose u16 config; \
74785fb6a16SShiju Jose int ret; \
74885fb6a16SShiju Jose \
74985fb6a16SShiju Jose if (!capable(CAP_SYS_RAWIO)) \
75085fb6a16SShiju Jose return -EPERM; \
75185fb6a16SShiju Jose \
75285fb6a16SShiju Jose ret = cxl_mem_ecs_get_attrbs(dev, ctx, fru_id, &log_cap, \
75385fb6a16SShiju Jose &config); \
75485fb6a16SShiju Jose if (ret) \
75585fb6a16SShiju Jose return ret; \
75685fb6a16SShiju Jose \
75785fb6a16SShiju Jose ret = cxl_set_ecs_##attrb(dev, &log_cap, &config, val); \
75885fb6a16SShiju Jose if (ret) \
75985fb6a16SShiju Jose return ret; \
76085fb6a16SShiju Jose \
76185fb6a16SShiju Jose return cxl_mem_ecs_set_attrbs(dev, ctx, fru_id, log_cap, \
76285fb6a16SShiju Jose config); \
76385fb6a16SShiju Jose }
76485fb6a16SShiju Jose CXL_ECS_SET_ATTR(log_entry_type)
76585fb6a16SShiju Jose CXL_ECS_SET_ATTR(count_mode)
76685fb6a16SShiju Jose CXL_ECS_SET_ATTR(reset_counter)
76785fb6a16SShiju Jose CXL_ECS_SET_ATTR(threshold)
76885fb6a16SShiju Jose
76985fb6a16SShiju Jose static const struct edac_ecs_ops cxl_ecs_ops = {
77085fb6a16SShiju Jose .get_log_entry_type = cxl_ecs_get_log_entry_type,
77185fb6a16SShiju Jose .set_log_entry_type = cxl_ecs_set_log_entry_type,
77285fb6a16SShiju Jose .get_mode = cxl_ecs_get_count_mode,
77385fb6a16SShiju Jose .set_mode = cxl_ecs_set_count_mode,
77485fb6a16SShiju Jose .reset = cxl_ecs_set_reset_counter,
77585fb6a16SShiju Jose .get_threshold = cxl_ecs_get_threshold,
77685fb6a16SShiju Jose .set_threshold = cxl_ecs_set_threshold,
77785fb6a16SShiju Jose };
77885fb6a16SShiju Jose
cxl_memdev_ecs_init(struct cxl_memdev * cxlmd,struct edac_dev_feature * ras_feature)77985fb6a16SShiju Jose static int cxl_memdev_ecs_init(struct cxl_memdev *cxlmd,
78085fb6a16SShiju Jose struct edac_dev_feature *ras_feature)
78185fb6a16SShiju Jose {
78285fb6a16SShiju Jose struct cxl_ecs_context *cxl_ecs_ctx;
78385fb6a16SShiju Jose struct cxl_feat_entry *feat_entry;
78485fb6a16SShiju Jose int num_media_frus;
78585fb6a16SShiju Jose
78685fb6a16SShiju Jose feat_entry =
78785fb6a16SShiju Jose cxl_feature_info(to_cxlfs(cxlmd->cxlds), &CXL_FEAT_ECS_UUID);
78885fb6a16SShiju Jose if (IS_ERR(feat_entry))
78985fb6a16SShiju Jose return -EOPNOTSUPP;
79085fb6a16SShiju Jose
79185fb6a16SShiju Jose if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
79285fb6a16SShiju Jose return -EOPNOTSUPP;
79385fb6a16SShiju Jose
79485fb6a16SShiju Jose num_media_frus = (le16_to_cpu(feat_entry->get_feat_size) -
79585fb6a16SShiju Jose sizeof(struct cxl_ecs_rd_attrbs)) /
79685fb6a16SShiju Jose sizeof(struct cxl_ecs_fru_rd_attrbs);
79785fb6a16SShiju Jose if (!num_media_frus)
79885fb6a16SShiju Jose return -EOPNOTSUPP;
79985fb6a16SShiju Jose
80085fb6a16SShiju Jose cxl_ecs_ctx =
80185fb6a16SShiju Jose devm_kzalloc(&cxlmd->dev, sizeof(*cxl_ecs_ctx), GFP_KERNEL);
80285fb6a16SShiju Jose if (!cxl_ecs_ctx)
80385fb6a16SShiju Jose return -ENOMEM;
80485fb6a16SShiju Jose
80585fb6a16SShiju Jose *cxl_ecs_ctx = (struct cxl_ecs_context){
80685fb6a16SShiju Jose .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
80785fb6a16SShiju Jose .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
80885fb6a16SShiju Jose .get_version = feat_entry->get_feat_ver,
80985fb6a16SShiju Jose .set_version = feat_entry->set_feat_ver,
81085fb6a16SShiju Jose .effects = le16_to_cpu(feat_entry->effects),
81185fb6a16SShiju Jose .num_media_frus = num_media_frus,
81285fb6a16SShiju Jose .cxlmd = cxlmd,
81385fb6a16SShiju Jose };
81485fb6a16SShiju Jose
81585fb6a16SShiju Jose ras_feature->ft_type = RAS_FEAT_ECS;
81685fb6a16SShiju Jose ras_feature->ecs_ops = &cxl_ecs_ops;
81785fb6a16SShiju Jose ras_feature->ctx = cxl_ecs_ctx;
81885fb6a16SShiju Jose ras_feature->ecs_info.num_media_frus = num_media_frus;
81985fb6a16SShiju Jose
82085fb6a16SShiju Jose return 0;
82185fb6a16SShiju Jose }
82285fb6a16SShiju Jose
823077ee5f7SShiju Jose /*
824077ee5f7SShiju Jose * Perform Maintenance CXL 3.2 Spec 8.2.10.7.1
825077ee5f7SShiju Jose */
826077ee5f7SShiju Jose
827077ee5f7SShiju Jose /*
828077ee5f7SShiju Jose * Perform Maintenance input payload
829077ee5f7SShiju Jose * CXL rev 3.2 section 8.2.10.7.1 Table 8-117
830077ee5f7SShiju Jose */
831077ee5f7SShiju Jose struct cxl_mbox_maintenance_hdr {
832077ee5f7SShiju Jose u8 op_class;
833077ee5f7SShiju Jose u8 op_subclass;
834077ee5f7SShiju Jose } __packed;
835077ee5f7SShiju Jose
cxl_perform_maintenance(struct cxl_mailbox * cxl_mbox,u8 class,u8 subclass,void * data_in,size_t data_in_size)836077ee5f7SShiju Jose static int cxl_perform_maintenance(struct cxl_mailbox *cxl_mbox, u8 class,
837077ee5f7SShiju Jose u8 subclass, void *data_in,
838077ee5f7SShiju Jose size_t data_in_size)
839077ee5f7SShiju Jose {
840077ee5f7SShiju Jose struct cxl_memdev_maintenance_pi {
841077ee5f7SShiju Jose struct cxl_mbox_maintenance_hdr hdr;
842077ee5f7SShiju Jose u8 data[];
843077ee5f7SShiju Jose } __packed;
844077ee5f7SShiju Jose struct cxl_mbox_cmd mbox_cmd;
845077ee5f7SShiju Jose size_t hdr_size;
846077ee5f7SShiju Jose
847077ee5f7SShiju Jose struct cxl_memdev_maintenance_pi *pi __free(kvfree) =
848077ee5f7SShiju Jose kvzalloc(cxl_mbox->payload_size, GFP_KERNEL);
849077ee5f7SShiju Jose if (!pi)
850077ee5f7SShiju Jose return -ENOMEM;
851077ee5f7SShiju Jose
852077ee5f7SShiju Jose pi->hdr.op_class = class;
853077ee5f7SShiju Jose pi->hdr.op_subclass = subclass;
854077ee5f7SShiju Jose hdr_size = sizeof(pi->hdr);
855077ee5f7SShiju Jose /*
856077ee5f7SShiju Jose * Check minimum mbox payload size is available for
857077ee5f7SShiju Jose * the maintenance data transfer.
858077ee5f7SShiju Jose */
859077ee5f7SShiju Jose if (hdr_size + data_in_size > cxl_mbox->payload_size)
860077ee5f7SShiju Jose return -ENOMEM;
861077ee5f7SShiju Jose
862077ee5f7SShiju Jose memcpy(pi->data, data_in, data_in_size);
863077ee5f7SShiju Jose mbox_cmd = (struct cxl_mbox_cmd){
864077ee5f7SShiju Jose .opcode = CXL_MBOX_OP_DO_MAINTENANCE,
865077ee5f7SShiju Jose .size_in = hdr_size + data_in_size,
866077ee5f7SShiju Jose .payload_in = pi,
867077ee5f7SShiju Jose };
868077ee5f7SShiju Jose
869077ee5f7SShiju Jose return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
870077ee5f7SShiju Jose }
871077ee5f7SShiju Jose
8720b5ccb0dSShiju Jose /*
8730b5ccb0dSShiju Jose * Support for finding a memory operation attributes
8740b5ccb0dSShiju Jose * are from the current boot or not.
8750b5ccb0dSShiju Jose */
8760b5ccb0dSShiju Jose
8770b5ccb0dSShiju Jose struct cxl_mem_err_rec {
8780b5ccb0dSShiju Jose struct xarray rec_gen_media;
8790b5ccb0dSShiju Jose struct xarray rec_dram;
8800b5ccb0dSShiju Jose };
8810b5ccb0dSShiju Jose
8820b5ccb0dSShiju Jose enum cxl_mem_repair_type {
8830b5ccb0dSShiju Jose CXL_PPR,
8840b5ccb0dSShiju Jose CXL_CACHELINE_SPARING,
8850b5ccb0dSShiju Jose CXL_ROW_SPARING,
8860b5ccb0dSShiju Jose CXL_BANK_SPARING,
8870b5ccb0dSShiju Jose CXL_RANK_SPARING,
8880b5ccb0dSShiju Jose CXL_REPAIR_MAX,
8890b5ccb0dSShiju Jose };
8900b5ccb0dSShiju Jose
8910b5ccb0dSShiju Jose /**
8920b5ccb0dSShiju Jose * struct cxl_mem_repair_attrbs - CXL memory repair attributes
8930b5ccb0dSShiju Jose * @dpa: DPA of memory to repair
8940b5ccb0dSShiju Jose * @nibble_mask: nibble mask, identifies one or more nibbles on the memory bus
8950b5ccb0dSShiju Jose * @row: row of memory to repair
8960b5ccb0dSShiju Jose * @column: column of memory to repair
8970b5ccb0dSShiju Jose * @channel: channel of memory to repair
8980b5ccb0dSShiju Jose * @sub_channel: sub channel of memory to repair
8990b5ccb0dSShiju Jose * @rank: rank of memory to repair
9000b5ccb0dSShiju Jose * @bank_group: bank group of memory to repair
9010b5ccb0dSShiju Jose * @bank: bank of memory to repair
9020b5ccb0dSShiju Jose * @repair_type: repair type. For eg. PPR, memory sparing etc.
9030b5ccb0dSShiju Jose */
9040b5ccb0dSShiju Jose struct cxl_mem_repair_attrbs {
9050b5ccb0dSShiju Jose u64 dpa;
9060b5ccb0dSShiju Jose u32 nibble_mask;
9070b5ccb0dSShiju Jose u32 row;
9080b5ccb0dSShiju Jose u16 column;
9090b5ccb0dSShiju Jose u8 channel;
9100b5ccb0dSShiju Jose u8 sub_channel;
9110b5ccb0dSShiju Jose u8 rank;
9120b5ccb0dSShiju Jose u8 bank_group;
9130b5ccb0dSShiju Jose u8 bank;
9140b5ccb0dSShiju Jose enum cxl_mem_repair_type repair_type;
9150b5ccb0dSShiju Jose };
9160b5ccb0dSShiju Jose
9170b5ccb0dSShiju Jose static struct cxl_event_gen_media *
cxl_find_rec_gen_media(struct cxl_memdev * cxlmd,struct cxl_mem_repair_attrbs * attrbs)9180b5ccb0dSShiju Jose cxl_find_rec_gen_media(struct cxl_memdev *cxlmd,
9190b5ccb0dSShiju Jose struct cxl_mem_repair_attrbs *attrbs)
9200b5ccb0dSShiju Jose {
9210b5ccb0dSShiju Jose struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
9220b5ccb0dSShiju Jose struct cxl_event_gen_media *rec;
9230b5ccb0dSShiju Jose
9240b5ccb0dSShiju Jose if (!array_rec)
9250b5ccb0dSShiju Jose return NULL;
9260b5ccb0dSShiju Jose
9270b5ccb0dSShiju Jose rec = xa_load(&array_rec->rec_gen_media, attrbs->dpa);
9280b5ccb0dSShiju Jose if (!rec)
9290b5ccb0dSShiju Jose return NULL;
9300b5ccb0dSShiju Jose
9310b5ccb0dSShiju Jose if (attrbs->repair_type == CXL_PPR)
9320b5ccb0dSShiju Jose return rec;
9330b5ccb0dSShiju Jose
9340b5ccb0dSShiju Jose return NULL;
9350b5ccb0dSShiju Jose }
9360b5ccb0dSShiju Jose
9370b5ccb0dSShiju Jose static struct cxl_event_dram *
cxl_find_rec_dram(struct cxl_memdev * cxlmd,struct cxl_mem_repair_attrbs * attrbs)9380b5ccb0dSShiju Jose cxl_find_rec_dram(struct cxl_memdev *cxlmd,
9390b5ccb0dSShiju Jose struct cxl_mem_repair_attrbs *attrbs)
9400b5ccb0dSShiju Jose {
9410b5ccb0dSShiju Jose struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
9420b5ccb0dSShiju Jose struct cxl_event_dram *rec;
9430b5ccb0dSShiju Jose u16 validity_flags;
9440b5ccb0dSShiju Jose
9450b5ccb0dSShiju Jose if (!array_rec)
9460b5ccb0dSShiju Jose return NULL;
9470b5ccb0dSShiju Jose
9480b5ccb0dSShiju Jose rec = xa_load(&array_rec->rec_dram, attrbs->dpa);
9490b5ccb0dSShiju Jose if (!rec)
9500b5ccb0dSShiju Jose return NULL;
9510b5ccb0dSShiju Jose
9520b5ccb0dSShiju Jose validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
9530b5ccb0dSShiju Jose if (!(validity_flags & CXL_DER_VALID_CHANNEL) ||
9540b5ccb0dSShiju Jose !(validity_flags & CXL_DER_VALID_RANK))
9550b5ccb0dSShiju Jose return NULL;
9560b5ccb0dSShiju Jose
9570b5ccb0dSShiju Jose switch (attrbs->repair_type) {
9580b5ccb0dSShiju Jose case CXL_PPR:
9590b5ccb0dSShiju Jose if (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
9600b5ccb0dSShiju Jose get_unaligned_le24(rec->nibble_mask) == attrbs->nibble_mask)
9610b5ccb0dSShiju Jose return rec;
9620b5ccb0dSShiju Jose break;
9630b5ccb0dSShiju Jose case CXL_CACHELINE_SPARING:
9640b5ccb0dSShiju Jose if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
9650b5ccb0dSShiju Jose !(validity_flags & CXL_DER_VALID_BANK) ||
9660b5ccb0dSShiju Jose !(validity_flags & CXL_DER_VALID_ROW) ||
9670b5ccb0dSShiju Jose !(validity_flags & CXL_DER_VALID_COLUMN))
9680b5ccb0dSShiju Jose return NULL;
9690b5ccb0dSShiju Jose
9700b5ccb0dSShiju Jose if (rec->media_hdr.channel == attrbs->channel &&
9710b5ccb0dSShiju Jose rec->media_hdr.rank == attrbs->rank &&
9720b5ccb0dSShiju Jose rec->bank_group == attrbs->bank_group &&
9730b5ccb0dSShiju Jose rec->bank == attrbs->bank &&
9740b5ccb0dSShiju Jose get_unaligned_le24(rec->row) == attrbs->row &&
9750b5ccb0dSShiju Jose get_unaligned_le16(rec->column) == attrbs->column &&
9760b5ccb0dSShiju Jose (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
9770b5ccb0dSShiju Jose get_unaligned_le24(rec->nibble_mask) ==
9780b5ccb0dSShiju Jose attrbs->nibble_mask) &&
9790b5ccb0dSShiju Jose (!(validity_flags & CXL_DER_VALID_SUB_CHANNEL) ||
9800b5ccb0dSShiju Jose rec->sub_channel == attrbs->sub_channel))
9810b5ccb0dSShiju Jose return rec;
9820b5ccb0dSShiju Jose break;
9830b5ccb0dSShiju Jose case CXL_ROW_SPARING:
9840b5ccb0dSShiju Jose if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
9850b5ccb0dSShiju Jose !(validity_flags & CXL_DER_VALID_BANK) ||
9860b5ccb0dSShiju Jose !(validity_flags & CXL_DER_VALID_ROW))
9870b5ccb0dSShiju Jose return NULL;
9880b5ccb0dSShiju Jose
9890b5ccb0dSShiju Jose if (rec->media_hdr.channel == attrbs->channel &&
9900b5ccb0dSShiju Jose rec->media_hdr.rank == attrbs->rank &&
9910b5ccb0dSShiju Jose rec->bank_group == attrbs->bank_group &&
9920b5ccb0dSShiju Jose rec->bank == attrbs->bank &&
9930b5ccb0dSShiju Jose get_unaligned_le24(rec->row) == attrbs->row &&
9940b5ccb0dSShiju Jose (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
9950b5ccb0dSShiju Jose get_unaligned_le24(rec->nibble_mask) ==
9960b5ccb0dSShiju Jose attrbs->nibble_mask))
9970b5ccb0dSShiju Jose return rec;
9980b5ccb0dSShiju Jose break;
9990b5ccb0dSShiju Jose case CXL_BANK_SPARING:
10000b5ccb0dSShiju Jose if (!(validity_flags & CXL_DER_VALID_BANK_GROUP) ||
10010b5ccb0dSShiju Jose !(validity_flags & CXL_DER_VALID_BANK))
10020b5ccb0dSShiju Jose return NULL;
10030b5ccb0dSShiju Jose
10040b5ccb0dSShiju Jose if (rec->media_hdr.channel == attrbs->channel &&
10050b5ccb0dSShiju Jose rec->media_hdr.rank == attrbs->rank &&
10060b5ccb0dSShiju Jose rec->bank_group == attrbs->bank_group &&
10070b5ccb0dSShiju Jose rec->bank == attrbs->bank &&
10080b5ccb0dSShiju Jose (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
10090b5ccb0dSShiju Jose get_unaligned_le24(rec->nibble_mask) ==
10100b5ccb0dSShiju Jose attrbs->nibble_mask))
10110b5ccb0dSShiju Jose return rec;
10120b5ccb0dSShiju Jose break;
10130b5ccb0dSShiju Jose case CXL_RANK_SPARING:
10140b5ccb0dSShiju Jose if (rec->media_hdr.channel == attrbs->channel &&
10150b5ccb0dSShiju Jose rec->media_hdr.rank == attrbs->rank &&
10160b5ccb0dSShiju Jose (!(validity_flags & CXL_DER_VALID_NIBBLE) ||
10170b5ccb0dSShiju Jose get_unaligned_le24(rec->nibble_mask) ==
10180b5ccb0dSShiju Jose attrbs->nibble_mask))
10190b5ccb0dSShiju Jose return rec;
10200b5ccb0dSShiju Jose break;
10210b5ccb0dSShiju Jose default:
10220b5ccb0dSShiju Jose return NULL;
10230b5ccb0dSShiju Jose }
10240b5ccb0dSShiju Jose
10250b5ccb0dSShiju Jose return NULL;
10260b5ccb0dSShiju Jose }
10270b5ccb0dSShiju Jose
10280b5ccb0dSShiju Jose #define CXL_MAX_STORAGE_DAYS 10
10290b5ccb0dSShiju Jose #define CXL_MAX_STORAGE_TIME_SECS (CXL_MAX_STORAGE_DAYS * 24 * 60 * 60)
10300b5ccb0dSShiju Jose
cxl_del_expired_gmedia_recs(struct xarray * rec_xarray,struct cxl_event_gen_media * cur_rec)10310b5ccb0dSShiju Jose static void cxl_del_expired_gmedia_recs(struct xarray *rec_xarray,
10320b5ccb0dSShiju Jose struct cxl_event_gen_media *cur_rec)
10330b5ccb0dSShiju Jose {
10340b5ccb0dSShiju Jose u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
10350b5ccb0dSShiju Jose struct cxl_event_gen_media *rec;
10360b5ccb0dSShiju Jose unsigned long index;
10370b5ccb0dSShiju Jose u64 delta_ts_secs;
10380b5ccb0dSShiju Jose
10390b5ccb0dSShiju Jose xa_for_each(rec_xarray, index, rec) {
10400b5ccb0dSShiju Jose delta_ts_secs = (cur_ts -
10410b5ccb0dSShiju Jose le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
10420b5ccb0dSShiju Jose if (delta_ts_secs >= CXL_MAX_STORAGE_TIME_SECS) {
10430b5ccb0dSShiju Jose xa_erase(rec_xarray, index);
10440b5ccb0dSShiju Jose kfree(rec);
10450b5ccb0dSShiju Jose }
10460b5ccb0dSShiju Jose }
10470b5ccb0dSShiju Jose }
10480b5ccb0dSShiju Jose
cxl_del_expired_dram_recs(struct xarray * rec_xarray,struct cxl_event_dram * cur_rec)10490b5ccb0dSShiju Jose static void cxl_del_expired_dram_recs(struct xarray *rec_xarray,
10500b5ccb0dSShiju Jose struct cxl_event_dram *cur_rec)
10510b5ccb0dSShiju Jose {
10520b5ccb0dSShiju Jose u64 cur_ts = le64_to_cpu(cur_rec->media_hdr.hdr.timestamp);
10530b5ccb0dSShiju Jose struct cxl_event_dram *rec;
10540b5ccb0dSShiju Jose unsigned long index;
10550b5ccb0dSShiju Jose u64 delta_secs;
10560b5ccb0dSShiju Jose
10570b5ccb0dSShiju Jose xa_for_each(rec_xarray, index, rec) {
10580b5ccb0dSShiju Jose delta_secs = (cur_ts -
10590b5ccb0dSShiju Jose le64_to_cpu(rec->media_hdr.hdr.timestamp)) / 1000000000ULL;
10600b5ccb0dSShiju Jose if (delta_secs >= CXL_MAX_STORAGE_TIME_SECS) {
10610b5ccb0dSShiju Jose xa_erase(rec_xarray, index);
10620b5ccb0dSShiju Jose kfree(rec);
10630b5ccb0dSShiju Jose }
10640b5ccb0dSShiju Jose }
10650b5ccb0dSShiju Jose }
10660b5ccb0dSShiju Jose
10670b5ccb0dSShiju Jose #define CXL_MAX_REC_STORAGE_COUNT 200
10680b5ccb0dSShiju Jose
cxl_del_overflow_old_recs(struct xarray * rec_xarray)10690b5ccb0dSShiju Jose static void cxl_del_overflow_old_recs(struct xarray *rec_xarray)
10700b5ccb0dSShiju Jose {
10710b5ccb0dSShiju Jose void *err_rec;
10720b5ccb0dSShiju Jose unsigned long index, count = 0;
10730b5ccb0dSShiju Jose
10740b5ccb0dSShiju Jose xa_for_each(rec_xarray, index, err_rec)
10750b5ccb0dSShiju Jose count++;
10760b5ccb0dSShiju Jose
10770b5ccb0dSShiju Jose if (count <= CXL_MAX_REC_STORAGE_COUNT)
10780b5ccb0dSShiju Jose return;
10790b5ccb0dSShiju Jose
10800b5ccb0dSShiju Jose count -= CXL_MAX_REC_STORAGE_COUNT;
10810b5ccb0dSShiju Jose xa_for_each(rec_xarray, index, err_rec) {
10820b5ccb0dSShiju Jose xa_erase(rec_xarray, index);
10830b5ccb0dSShiju Jose kfree(err_rec);
10840b5ccb0dSShiju Jose count--;
10850b5ccb0dSShiju Jose if (!count)
10860b5ccb0dSShiju Jose break;
10870b5ccb0dSShiju Jose }
10880b5ccb0dSShiju Jose }
10890b5ccb0dSShiju Jose
cxl_store_rec_gen_media(struct cxl_memdev * cxlmd,union cxl_event * evt)10900b5ccb0dSShiju Jose int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt)
10910b5ccb0dSShiju Jose {
10920b5ccb0dSShiju Jose struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
10930b5ccb0dSShiju Jose struct cxl_event_gen_media *rec;
10940b5ccb0dSShiju Jose void *old_rec;
10950b5ccb0dSShiju Jose
10960b5ccb0dSShiju Jose if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
10970b5ccb0dSShiju Jose return 0;
10980b5ccb0dSShiju Jose
10990b5ccb0dSShiju Jose rec = kmemdup(&evt->gen_media, sizeof(*rec), GFP_KERNEL);
11000b5ccb0dSShiju Jose if (!rec)
11010b5ccb0dSShiju Jose return -ENOMEM;
11020b5ccb0dSShiju Jose
11030b5ccb0dSShiju Jose old_rec = xa_store(&array_rec->rec_gen_media,
11040b5ccb0dSShiju Jose le64_to_cpu(rec->media_hdr.phys_addr), rec,
11050b5ccb0dSShiju Jose GFP_KERNEL);
1106a403fe6cSLi Ming if (xa_is_err(old_rec)) {
1107a403fe6cSLi Ming kfree(rec);
11080b5ccb0dSShiju Jose return xa_err(old_rec);
1109a403fe6cSLi Ming }
11100b5ccb0dSShiju Jose
11110b5ccb0dSShiju Jose kfree(old_rec);
11120b5ccb0dSShiju Jose
11130b5ccb0dSShiju Jose cxl_del_expired_gmedia_recs(&array_rec->rec_gen_media, rec);
11140b5ccb0dSShiju Jose cxl_del_overflow_old_recs(&array_rec->rec_gen_media);
11150b5ccb0dSShiju Jose
11160b5ccb0dSShiju Jose return 0;
11170b5ccb0dSShiju Jose }
11180b5ccb0dSShiju Jose EXPORT_SYMBOL_NS_GPL(cxl_store_rec_gen_media, "CXL");
11190b5ccb0dSShiju Jose
cxl_store_rec_dram(struct cxl_memdev * cxlmd,union cxl_event * evt)11200b5ccb0dSShiju Jose int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt)
11210b5ccb0dSShiju Jose {
11220b5ccb0dSShiju Jose struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
11230b5ccb0dSShiju Jose struct cxl_event_dram *rec;
11240b5ccb0dSShiju Jose void *old_rec;
11250b5ccb0dSShiju Jose
11260b5ccb0dSShiju Jose if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
11270b5ccb0dSShiju Jose return 0;
11280b5ccb0dSShiju Jose
11290b5ccb0dSShiju Jose rec = kmemdup(&evt->dram, sizeof(*rec), GFP_KERNEL);
11300b5ccb0dSShiju Jose if (!rec)
11310b5ccb0dSShiju Jose return -ENOMEM;
11320b5ccb0dSShiju Jose
11330b5ccb0dSShiju Jose old_rec = xa_store(&array_rec->rec_dram,
11340b5ccb0dSShiju Jose le64_to_cpu(rec->media_hdr.phys_addr), rec,
11350b5ccb0dSShiju Jose GFP_KERNEL);
1136a403fe6cSLi Ming if (xa_is_err(old_rec)) {
1137a403fe6cSLi Ming kfree(rec);
11380b5ccb0dSShiju Jose return xa_err(old_rec);
1139a403fe6cSLi Ming }
11400b5ccb0dSShiju Jose
11410b5ccb0dSShiju Jose kfree(old_rec);
11420b5ccb0dSShiju Jose
11430b5ccb0dSShiju Jose cxl_del_expired_dram_recs(&array_rec->rec_dram, rec);
11440b5ccb0dSShiju Jose cxl_del_overflow_old_recs(&array_rec->rec_dram);
11450b5ccb0dSShiju Jose
11460b5ccb0dSShiju Jose return 0;
11470b5ccb0dSShiju Jose }
11480b5ccb0dSShiju Jose EXPORT_SYMBOL_NS_GPL(cxl_store_rec_dram, "CXL");
11490b5ccb0dSShiju Jose
cxl_is_memdev_memory_online(const struct cxl_memdev * cxlmd)1150588ca944SShiju Jose static bool cxl_is_memdev_memory_online(const struct cxl_memdev *cxlmd)
1151588ca944SShiju Jose {
1152588ca944SShiju Jose struct cxl_port *port = cxlmd->endpoint;
1153588ca944SShiju Jose
1154588ca944SShiju Jose if (port && cxl_num_decoders_committed(port))
1155588ca944SShiju Jose return true;
1156588ca944SShiju Jose
1157588ca944SShiju Jose return false;
1158588ca944SShiju Jose }
1159588ca944SShiju Jose
1160588ca944SShiju Jose /*
1161588ca944SShiju Jose * CXL memory sparing control
1162588ca944SShiju Jose */
1163588ca944SShiju Jose enum cxl_mem_sparing_granularity {
1164588ca944SShiju Jose CXL_MEM_SPARING_CACHELINE,
1165588ca944SShiju Jose CXL_MEM_SPARING_ROW,
1166588ca944SShiju Jose CXL_MEM_SPARING_BANK,
1167588ca944SShiju Jose CXL_MEM_SPARING_RANK,
1168588ca944SShiju Jose CXL_MEM_SPARING_MAX
1169588ca944SShiju Jose };
1170588ca944SShiju Jose
1171588ca944SShiju Jose struct cxl_mem_sparing_context {
1172588ca944SShiju Jose struct cxl_memdev *cxlmd;
1173588ca944SShiju Jose uuid_t repair_uuid;
1174588ca944SShiju Jose u16 get_feat_size;
1175588ca944SShiju Jose u16 set_feat_size;
1176588ca944SShiju Jose u16 effects;
1177588ca944SShiju Jose u8 instance;
1178588ca944SShiju Jose u8 get_version;
1179588ca944SShiju Jose u8 set_version;
1180588ca944SShiju Jose u8 op_class;
1181588ca944SShiju Jose u8 op_subclass;
1182588ca944SShiju Jose bool cap_safe_when_in_use;
1183588ca944SShiju Jose bool cap_hard_sparing;
1184588ca944SShiju Jose bool cap_soft_sparing;
1185588ca944SShiju Jose u8 channel;
1186588ca944SShiju Jose u8 rank;
1187588ca944SShiju Jose u8 bank_group;
1188588ca944SShiju Jose u32 nibble_mask;
1189588ca944SShiju Jose u64 dpa;
1190588ca944SShiju Jose u32 row;
1191588ca944SShiju Jose u16 column;
1192588ca944SShiju Jose u8 bank;
1193588ca944SShiju Jose u8 sub_channel;
1194588ca944SShiju Jose enum edac_mem_repair_type repair_type;
1195588ca944SShiju Jose bool persist_mode;
1196588ca944SShiju Jose };
1197588ca944SShiju Jose
1198588ca944SShiju Jose #define CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK BIT(0)
1199588ca944SShiju Jose #define CXL_SPARING_RD_CAP_HARD_SPARING_MASK BIT(1)
1200588ca944SShiju Jose #define CXL_SPARING_RD_CAP_SOFT_SPARING_MASK BIT(2)
1201588ca944SShiju Jose
1202588ca944SShiju Jose #define CXL_SPARING_WR_DEVICE_INITIATED_MASK BIT(0)
1203588ca944SShiju Jose
1204588ca944SShiju Jose #define CXL_SPARING_QUERY_RESOURCE_FLAG BIT(0)
1205588ca944SShiju Jose #define CXL_SET_HARD_SPARING_FLAG BIT(1)
1206588ca944SShiju Jose #define CXL_SPARING_SUB_CHNL_VALID_FLAG BIT(2)
1207588ca944SShiju Jose #define CXL_SPARING_NIB_MASK_VALID_FLAG BIT(3)
1208588ca944SShiju Jose
1209588ca944SShiju Jose #define CXL_GET_SPARING_SAFE_IN_USE(flags) \
1210588ca944SShiju Jose (FIELD_GET(CXL_SPARING_RD_CAP_SAFE_IN_USE_MASK, \
1211588ca944SShiju Jose flags) ^ 1)
1212588ca944SShiju Jose #define CXL_GET_CAP_HARD_SPARING(flags) \
1213588ca944SShiju Jose FIELD_GET(CXL_SPARING_RD_CAP_HARD_SPARING_MASK, \
1214588ca944SShiju Jose flags)
1215588ca944SShiju Jose #define CXL_GET_CAP_SOFT_SPARING(flags) \
1216588ca944SShiju Jose FIELD_GET(CXL_SPARING_RD_CAP_SOFT_SPARING_MASK, \
1217588ca944SShiju Jose flags)
1218588ca944SShiju Jose
1219588ca944SShiju Jose #define CXL_SET_SPARING_QUERY_RESOURCE(val) \
1220588ca944SShiju Jose FIELD_PREP(CXL_SPARING_QUERY_RESOURCE_FLAG, val)
1221588ca944SShiju Jose #define CXL_SET_HARD_SPARING(val) \
1222588ca944SShiju Jose FIELD_PREP(CXL_SET_HARD_SPARING_FLAG, val)
1223588ca944SShiju Jose #define CXL_SET_SPARING_SUB_CHNL_VALID(val) \
1224588ca944SShiju Jose FIELD_PREP(CXL_SPARING_SUB_CHNL_VALID_FLAG, val)
1225588ca944SShiju Jose #define CXL_SET_SPARING_NIB_MASK_VALID(val) \
1226588ca944SShiju Jose FIELD_PREP(CXL_SPARING_NIB_MASK_VALID_FLAG, val)
1227588ca944SShiju Jose
1228588ca944SShiju Jose /*
1229588ca944SShiju Jose * See CXL spec rev 3.2 @8.2.10.7.2.3 Table 8-134 Memory Sparing Feature
1230588ca944SShiju Jose * Readable Attributes.
1231588ca944SShiju Jose */
1232588ca944SShiju Jose struct cxl_memdev_repair_rd_attrbs_hdr {
1233588ca944SShiju Jose u8 max_op_latency;
1234588ca944SShiju Jose __le16 op_cap;
1235588ca944SShiju Jose __le16 op_mode;
1236588ca944SShiju Jose u8 op_class;
1237588ca944SShiju Jose u8 op_subclass;
1238588ca944SShiju Jose u8 rsvd[9];
1239588ca944SShiju Jose } __packed;
1240588ca944SShiju Jose
1241588ca944SShiju Jose struct cxl_memdev_sparing_rd_attrbs {
1242588ca944SShiju Jose struct cxl_memdev_repair_rd_attrbs_hdr hdr;
1243588ca944SShiju Jose u8 rsvd;
1244588ca944SShiju Jose __le16 restriction_flags;
1245588ca944SShiju Jose } __packed;
1246588ca944SShiju Jose
1247588ca944SShiju Jose /*
1248588ca944SShiju Jose * See CXL spec rev 3.2 @8.2.10.7.1.4 Table 8-120 Memory Sparing Input Payload.
1249588ca944SShiju Jose */
1250588ca944SShiju Jose struct cxl_memdev_sparing_in_payload {
1251588ca944SShiju Jose u8 flags;
1252588ca944SShiju Jose u8 channel;
1253588ca944SShiju Jose u8 rank;
1254588ca944SShiju Jose u8 nibble_mask[3];
1255588ca944SShiju Jose u8 bank_group;
1256588ca944SShiju Jose u8 bank;
1257588ca944SShiju Jose u8 row[3];
1258588ca944SShiju Jose __le16 column;
1259588ca944SShiju Jose u8 sub_channel;
1260588ca944SShiju Jose } __packed;
1261588ca944SShiju Jose
1262588ca944SShiju Jose static int
cxl_mem_sparing_get_attrbs(struct cxl_mem_sparing_context * cxl_sparing_ctx)1263588ca944SShiju Jose cxl_mem_sparing_get_attrbs(struct cxl_mem_sparing_context *cxl_sparing_ctx)
1264588ca944SShiju Jose {
1265588ca944SShiju Jose size_t rd_data_size = sizeof(struct cxl_memdev_sparing_rd_attrbs);
1266588ca944SShiju Jose struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
1267588ca944SShiju Jose struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
1268588ca944SShiju Jose u16 restriction_flags;
1269588ca944SShiju Jose size_t data_size;
1270588ca944SShiju Jose u16 return_code;
1271588ca944SShiju Jose struct cxl_memdev_sparing_rd_attrbs *rd_attrbs __free(kfree) =
1272588ca944SShiju Jose kzalloc(rd_data_size, GFP_KERNEL);
1273588ca944SShiju Jose if (!rd_attrbs)
1274588ca944SShiju Jose return -ENOMEM;
1275588ca944SShiju Jose
1276588ca944SShiju Jose data_size = cxl_get_feature(cxl_mbox, &cxl_sparing_ctx->repair_uuid,
1277588ca944SShiju Jose CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
1278588ca944SShiju Jose rd_data_size, 0, &return_code);
1279588ca944SShiju Jose if (!data_size)
1280588ca944SShiju Jose return -EIO;
1281588ca944SShiju Jose
1282588ca944SShiju Jose cxl_sparing_ctx->op_class = rd_attrbs->hdr.op_class;
1283588ca944SShiju Jose cxl_sparing_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
1284588ca944SShiju Jose restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
1285588ca944SShiju Jose cxl_sparing_ctx->cap_safe_when_in_use =
1286588ca944SShiju Jose CXL_GET_SPARING_SAFE_IN_USE(restriction_flags);
1287588ca944SShiju Jose cxl_sparing_ctx->cap_hard_sparing =
1288588ca944SShiju Jose CXL_GET_CAP_HARD_SPARING(restriction_flags);
1289588ca944SShiju Jose cxl_sparing_ctx->cap_soft_sparing =
1290588ca944SShiju Jose CXL_GET_CAP_SOFT_SPARING(restriction_flags);
1291588ca944SShiju Jose
1292588ca944SShiju Jose return 0;
1293588ca944SShiju Jose }
1294588ca944SShiju Jose
1295588ca944SShiju Jose static struct cxl_event_dram *
cxl_mem_get_rec_dram(struct cxl_memdev * cxlmd,struct cxl_mem_sparing_context * ctx)1296588ca944SShiju Jose cxl_mem_get_rec_dram(struct cxl_memdev *cxlmd,
1297588ca944SShiju Jose struct cxl_mem_sparing_context *ctx)
1298588ca944SShiju Jose {
1299588ca944SShiju Jose struct cxl_mem_repair_attrbs attrbs = { 0 };
1300588ca944SShiju Jose
1301588ca944SShiju Jose attrbs.dpa = ctx->dpa;
1302588ca944SShiju Jose attrbs.channel = ctx->channel;
1303588ca944SShiju Jose attrbs.rank = ctx->rank;
1304588ca944SShiju Jose attrbs.nibble_mask = ctx->nibble_mask;
1305588ca944SShiju Jose switch (ctx->repair_type) {
1306588ca944SShiju Jose case EDAC_REPAIR_CACHELINE_SPARING:
1307588ca944SShiju Jose attrbs.repair_type = CXL_CACHELINE_SPARING;
1308588ca944SShiju Jose attrbs.bank_group = ctx->bank_group;
1309588ca944SShiju Jose attrbs.bank = ctx->bank;
1310588ca944SShiju Jose attrbs.row = ctx->row;
1311588ca944SShiju Jose attrbs.column = ctx->column;
1312588ca944SShiju Jose attrbs.sub_channel = ctx->sub_channel;
1313588ca944SShiju Jose break;
1314588ca944SShiju Jose case EDAC_REPAIR_ROW_SPARING:
1315588ca944SShiju Jose attrbs.repair_type = CXL_ROW_SPARING;
1316588ca944SShiju Jose attrbs.bank_group = ctx->bank_group;
1317588ca944SShiju Jose attrbs.bank = ctx->bank;
1318588ca944SShiju Jose attrbs.row = ctx->row;
1319588ca944SShiju Jose break;
1320588ca944SShiju Jose case EDAC_REPAIR_BANK_SPARING:
1321588ca944SShiju Jose attrbs.repair_type = CXL_BANK_SPARING;
1322588ca944SShiju Jose attrbs.bank_group = ctx->bank_group;
1323588ca944SShiju Jose attrbs.bank = ctx->bank;
1324588ca944SShiju Jose break;
1325588ca944SShiju Jose case EDAC_REPAIR_RANK_SPARING:
1326*0a46f60aSLi Ming attrbs.repair_type = CXL_RANK_SPARING;
1327588ca944SShiju Jose break;
1328588ca944SShiju Jose default:
1329588ca944SShiju Jose return NULL;
1330588ca944SShiju Jose }
1331588ca944SShiju Jose
1332588ca944SShiju Jose return cxl_find_rec_dram(cxlmd, &attrbs);
1333588ca944SShiju Jose }
1334588ca944SShiju Jose
1335588ca944SShiju Jose static int
cxl_mem_perform_sparing(struct device * dev,struct cxl_mem_sparing_context * cxl_sparing_ctx)1336588ca944SShiju Jose cxl_mem_perform_sparing(struct device *dev,
1337588ca944SShiju Jose struct cxl_mem_sparing_context *cxl_sparing_ctx)
1338588ca944SShiju Jose {
1339588ca944SShiju Jose struct cxl_memdev *cxlmd = cxl_sparing_ctx->cxlmd;
1340588ca944SShiju Jose struct cxl_memdev_sparing_in_payload sparing_pi;
1341588ca944SShiju Jose struct cxl_event_dram *rec = NULL;
1342588ca944SShiju Jose u16 validity_flags = 0;
1343588ca944SShiju Jose
1344588ca944SShiju Jose struct rw_semaphore *region_lock __free(rwsem_read_release) =
1345588ca944SShiju Jose rwsem_read_intr_acquire(&cxl_region_rwsem);
1346588ca944SShiju Jose if (!region_lock)
1347588ca944SShiju Jose return -EINTR;
1348588ca944SShiju Jose
1349588ca944SShiju Jose struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
1350588ca944SShiju Jose rwsem_read_intr_acquire(&cxl_dpa_rwsem);
1351588ca944SShiju Jose if (!dpa_lock)
1352588ca944SShiju Jose return -EINTR;
1353588ca944SShiju Jose
1354588ca944SShiju Jose if (!cxl_sparing_ctx->cap_safe_when_in_use) {
1355588ca944SShiju Jose /* Memory to repair must be offline */
1356588ca944SShiju Jose if (cxl_is_memdev_memory_online(cxlmd))
1357588ca944SShiju Jose return -EBUSY;
1358588ca944SShiju Jose } else {
1359588ca944SShiju Jose if (cxl_is_memdev_memory_online(cxlmd)) {
1360588ca944SShiju Jose rec = cxl_mem_get_rec_dram(cxlmd, cxl_sparing_ctx);
1361588ca944SShiju Jose if (!rec)
1362588ca944SShiju Jose return -EINVAL;
1363588ca944SShiju Jose
1364588ca944SShiju Jose if (!get_unaligned_le16(rec->media_hdr.validity_flags))
1365588ca944SShiju Jose return -EINVAL;
1366588ca944SShiju Jose }
1367588ca944SShiju Jose }
1368588ca944SShiju Jose
1369588ca944SShiju Jose memset(&sparing_pi, 0, sizeof(sparing_pi));
1370588ca944SShiju Jose sparing_pi.flags = CXL_SET_SPARING_QUERY_RESOURCE(0);
1371588ca944SShiju Jose if (cxl_sparing_ctx->persist_mode)
1372588ca944SShiju Jose sparing_pi.flags |= CXL_SET_HARD_SPARING(1);
1373588ca944SShiju Jose
1374588ca944SShiju Jose if (rec)
1375588ca944SShiju Jose validity_flags = get_unaligned_le16(rec->media_hdr.validity_flags);
1376588ca944SShiju Jose
1377588ca944SShiju Jose switch (cxl_sparing_ctx->repair_type) {
1378588ca944SShiju Jose case EDAC_REPAIR_CACHELINE_SPARING:
1379588ca944SShiju Jose sparing_pi.column = cpu_to_le16(cxl_sparing_ctx->column);
1380588ca944SShiju Jose if (!rec || (validity_flags & CXL_DER_VALID_SUB_CHANNEL)) {
1381588ca944SShiju Jose sparing_pi.flags |= CXL_SET_SPARING_SUB_CHNL_VALID(1);
1382588ca944SShiju Jose sparing_pi.sub_channel = cxl_sparing_ctx->sub_channel;
1383588ca944SShiju Jose }
1384588ca944SShiju Jose fallthrough;
1385588ca944SShiju Jose case EDAC_REPAIR_ROW_SPARING:
1386588ca944SShiju Jose put_unaligned_le24(cxl_sparing_ctx->row, sparing_pi.row);
1387588ca944SShiju Jose fallthrough;
1388588ca944SShiju Jose case EDAC_REPAIR_BANK_SPARING:
1389588ca944SShiju Jose sparing_pi.bank_group = cxl_sparing_ctx->bank_group;
1390588ca944SShiju Jose sparing_pi.bank = cxl_sparing_ctx->bank;
1391588ca944SShiju Jose fallthrough;
1392588ca944SShiju Jose case EDAC_REPAIR_RANK_SPARING:
1393588ca944SShiju Jose sparing_pi.rank = cxl_sparing_ctx->rank;
1394588ca944SShiju Jose fallthrough;
1395588ca944SShiju Jose default:
1396588ca944SShiju Jose sparing_pi.channel = cxl_sparing_ctx->channel;
1397588ca944SShiju Jose if ((rec && (validity_flags & CXL_DER_VALID_NIBBLE)) ||
1398588ca944SShiju Jose (!rec && (!cxl_sparing_ctx->nibble_mask ||
1399588ca944SShiju Jose (cxl_sparing_ctx->nibble_mask & 0xFFFFFF)))) {
1400588ca944SShiju Jose sparing_pi.flags |= CXL_SET_SPARING_NIB_MASK_VALID(1);
1401588ca944SShiju Jose put_unaligned_le24(cxl_sparing_ctx->nibble_mask,
1402588ca944SShiju Jose sparing_pi.nibble_mask);
1403588ca944SShiju Jose }
1404588ca944SShiju Jose break;
1405588ca944SShiju Jose }
1406588ca944SShiju Jose
1407588ca944SShiju Jose return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
1408588ca944SShiju Jose cxl_sparing_ctx->op_class,
1409588ca944SShiju Jose cxl_sparing_ctx->op_subclass,
1410588ca944SShiju Jose &sparing_pi, sizeof(sparing_pi));
1411588ca944SShiju Jose }
1412588ca944SShiju Jose
cxl_mem_sparing_get_repair_type(struct device * dev,void * drv_data,const char ** repair_type)1413588ca944SShiju Jose static int cxl_mem_sparing_get_repair_type(struct device *dev, void *drv_data,
1414588ca944SShiju Jose const char **repair_type)
1415588ca944SShiju Jose {
1416588ca944SShiju Jose struct cxl_mem_sparing_context *ctx = drv_data;
1417588ca944SShiju Jose
1418588ca944SShiju Jose switch (ctx->repair_type) {
1419588ca944SShiju Jose case EDAC_REPAIR_CACHELINE_SPARING:
1420588ca944SShiju Jose case EDAC_REPAIR_ROW_SPARING:
1421588ca944SShiju Jose case EDAC_REPAIR_BANK_SPARING:
1422588ca944SShiju Jose case EDAC_REPAIR_RANK_SPARING:
1423588ca944SShiju Jose *repair_type = edac_repair_type[ctx->repair_type];
1424588ca944SShiju Jose break;
1425588ca944SShiju Jose default:
1426588ca944SShiju Jose return -EINVAL;
1427588ca944SShiju Jose }
1428588ca944SShiju Jose
1429588ca944SShiju Jose return 0;
1430588ca944SShiju Jose }
1431588ca944SShiju Jose
1432588ca944SShiju Jose #define CXL_SPARING_GET_ATTR(attrb, data_type) \
1433588ca944SShiju Jose static int cxl_mem_sparing_get_##attrb( \
1434588ca944SShiju Jose struct device *dev, void *drv_data, data_type *val) \
1435588ca944SShiju Jose { \
1436588ca944SShiju Jose struct cxl_mem_sparing_context *ctx = drv_data; \
1437588ca944SShiju Jose \
1438588ca944SShiju Jose *val = ctx->attrb; \
1439588ca944SShiju Jose \
1440588ca944SShiju Jose return 0; \
1441588ca944SShiju Jose }
CXL_SPARING_GET_ATTR(persist_mode,bool)1442588ca944SShiju Jose CXL_SPARING_GET_ATTR(persist_mode, bool)
1443588ca944SShiju Jose CXL_SPARING_GET_ATTR(dpa, u64)
1444588ca944SShiju Jose CXL_SPARING_GET_ATTR(nibble_mask, u32)
1445588ca944SShiju Jose CXL_SPARING_GET_ATTR(bank_group, u32)
1446588ca944SShiju Jose CXL_SPARING_GET_ATTR(bank, u32)
1447588ca944SShiju Jose CXL_SPARING_GET_ATTR(rank, u32)
1448588ca944SShiju Jose CXL_SPARING_GET_ATTR(row, u32)
1449588ca944SShiju Jose CXL_SPARING_GET_ATTR(column, u32)
1450588ca944SShiju Jose CXL_SPARING_GET_ATTR(channel, u32)
1451588ca944SShiju Jose CXL_SPARING_GET_ATTR(sub_channel, u32)
1452588ca944SShiju Jose
1453588ca944SShiju Jose #define CXL_SPARING_SET_ATTR(attrb, data_type) \
1454588ca944SShiju Jose static int cxl_mem_sparing_set_##attrb(struct device *dev, \
1455588ca944SShiju Jose void *drv_data, data_type val) \
1456588ca944SShiju Jose { \
1457588ca944SShiju Jose struct cxl_mem_sparing_context *ctx = drv_data; \
1458588ca944SShiju Jose \
1459588ca944SShiju Jose ctx->attrb = val; \
1460588ca944SShiju Jose \
1461588ca944SShiju Jose return 0; \
1462588ca944SShiju Jose }
1463588ca944SShiju Jose CXL_SPARING_SET_ATTR(nibble_mask, u32)
1464588ca944SShiju Jose CXL_SPARING_SET_ATTR(bank_group, u32)
1465588ca944SShiju Jose CXL_SPARING_SET_ATTR(bank, u32)
1466588ca944SShiju Jose CXL_SPARING_SET_ATTR(rank, u32)
1467588ca944SShiju Jose CXL_SPARING_SET_ATTR(row, u32)
1468588ca944SShiju Jose CXL_SPARING_SET_ATTR(column, u32)
1469588ca944SShiju Jose CXL_SPARING_SET_ATTR(channel, u32)
1470588ca944SShiju Jose CXL_SPARING_SET_ATTR(sub_channel, u32)
1471588ca944SShiju Jose
1472588ca944SShiju Jose static int cxl_mem_sparing_set_persist_mode(struct device *dev, void *drv_data,
1473588ca944SShiju Jose bool persist_mode)
1474588ca944SShiju Jose {
1475588ca944SShiju Jose struct cxl_mem_sparing_context *ctx = drv_data;
1476588ca944SShiju Jose
1477588ca944SShiju Jose if ((persist_mode && ctx->cap_hard_sparing) ||
1478588ca944SShiju Jose (!persist_mode && ctx->cap_soft_sparing))
1479588ca944SShiju Jose ctx->persist_mode = persist_mode;
1480588ca944SShiju Jose else
1481588ca944SShiju Jose return -EOPNOTSUPP;
1482588ca944SShiju Jose
1483588ca944SShiju Jose return 0;
1484588ca944SShiju Jose }
1485588ca944SShiju Jose
cxl_get_mem_sparing_safe_when_in_use(struct device * dev,void * drv_data,bool * safe)1486588ca944SShiju Jose static int cxl_get_mem_sparing_safe_when_in_use(struct device *dev,
1487588ca944SShiju Jose void *drv_data, bool *safe)
1488588ca944SShiju Jose {
1489588ca944SShiju Jose struct cxl_mem_sparing_context *ctx = drv_data;
1490588ca944SShiju Jose
1491588ca944SShiju Jose *safe = ctx->cap_safe_when_in_use;
1492588ca944SShiju Jose
1493588ca944SShiju Jose return 0;
1494588ca944SShiju Jose }
1495588ca944SShiju Jose
cxl_mem_sparing_get_min_dpa(struct device * dev,void * drv_data,u64 * min_dpa)1496588ca944SShiju Jose static int cxl_mem_sparing_get_min_dpa(struct device *dev, void *drv_data,
1497588ca944SShiju Jose u64 *min_dpa)
1498588ca944SShiju Jose {
1499588ca944SShiju Jose struct cxl_mem_sparing_context *ctx = drv_data;
1500588ca944SShiju Jose struct cxl_memdev *cxlmd = ctx->cxlmd;
1501588ca944SShiju Jose struct cxl_dev_state *cxlds = cxlmd->cxlds;
1502588ca944SShiju Jose
1503588ca944SShiju Jose *min_dpa = cxlds->dpa_res.start;
1504588ca944SShiju Jose
1505588ca944SShiju Jose return 0;
1506588ca944SShiju Jose }
1507588ca944SShiju Jose
cxl_mem_sparing_get_max_dpa(struct device * dev,void * drv_data,u64 * max_dpa)1508588ca944SShiju Jose static int cxl_mem_sparing_get_max_dpa(struct device *dev, void *drv_data,
1509588ca944SShiju Jose u64 *max_dpa)
1510588ca944SShiju Jose {
1511588ca944SShiju Jose struct cxl_mem_sparing_context *ctx = drv_data;
1512588ca944SShiju Jose struct cxl_memdev *cxlmd = ctx->cxlmd;
1513588ca944SShiju Jose struct cxl_dev_state *cxlds = cxlmd->cxlds;
1514588ca944SShiju Jose
1515588ca944SShiju Jose *max_dpa = cxlds->dpa_res.end;
1516588ca944SShiju Jose
1517588ca944SShiju Jose return 0;
1518588ca944SShiju Jose }
1519588ca944SShiju Jose
cxl_mem_sparing_set_dpa(struct device * dev,void * drv_data,u64 dpa)1520588ca944SShiju Jose static int cxl_mem_sparing_set_dpa(struct device *dev, void *drv_data, u64 dpa)
1521588ca944SShiju Jose {
1522588ca944SShiju Jose struct cxl_mem_sparing_context *ctx = drv_data;
1523588ca944SShiju Jose struct cxl_memdev *cxlmd = ctx->cxlmd;
1524588ca944SShiju Jose struct cxl_dev_state *cxlds = cxlmd->cxlds;
1525588ca944SShiju Jose
1526588ca944SShiju Jose if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
1527588ca944SShiju Jose return -EINVAL;
1528588ca944SShiju Jose
1529588ca944SShiju Jose ctx->dpa = dpa;
1530588ca944SShiju Jose
1531588ca944SShiju Jose return 0;
1532588ca944SShiju Jose }
1533588ca944SShiju Jose
cxl_do_mem_sparing(struct device * dev,void * drv_data,u32 val)1534588ca944SShiju Jose static int cxl_do_mem_sparing(struct device *dev, void *drv_data, u32 val)
1535588ca944SShiju Jose {
1536588ca944SShiju Jose struct cxl_mem_sparing_context *ctx = drv_data;
1537588ca944SShiju Jose
1538588ca944SShiju Jose if (val != EDAC_DO_MEM_REPAIR)
1539588ca944SShiju Jose return -EINVAL;
1540588ca944SShiju Jose
1541588ca944SShiju Jose return cxl_mem_perform_sparing(dev, ctx);
1542588ca944SShiju Jose }
1543588ca944SShiju Jose
1544588ca944SShiju Jose #define RANK_OPS \
1545588ca944SShiju Jose .get_repair_type = cxl_mem_sparing_get_repair_type, \
1546588ca944SShiju Jose .get_persist_mode = cxl_mem_sparing_get_persist_mode, \
1547588ca944SShiju Jose .set_persist_mode = cxl_mem_sparing_set_persist_mode, \
1548588ca944SShiju Jose .get_repair_safe_when_in_use = cxl_get_mem_sparing_safe_when_in_use, \
1549588ca944SShiju Jose .get_min_dpa = cxl_mem_sparing_get_min_dpa, \
1550588ca944SShiju Jose .get_max_dpa = cxl_mem_sparing_get_max_dpa, \
1551588ca944SShiju Jose .get_dpa = cxl_mem_sparing_get_dpa, \
1552588ca944SShiju Jose .set_dpa = cxl_mem_sparing_set_dpa, \
1553588ca944SShiju Jose .get_nibble_mask = cxl_mem_sparing_get_nibble_mask, \
1554588ca944SShiju Jose .set_nibble_mask = cxl_mem_sparing_set_nibble_mask, \
1555588ca944SShiju Jose .get_rank = cxl_mem_sparing_get_rank, \
1556588ca944SShiju Jose .set_rank = cxl_mem_sparing_set_rank, \
1557588ca944SShiju Jose .get_channel = cxl_mem_sparing_get_channel, \
1558588ca944SShiju Jose .set_channel = cxl_mem_sparing_set_channel, \
1559588ca944SShiju Jose .do_repair = cxl_do_mem_sparing
1560588ca944SShiju Jose
1561588ca944SShiju Jose #define BANK_OPS \
1562588ca944SShiju Jose RANK_OPS, .get_bank_group = cxl_mem_sparing_get_bank_group, \
1563588ca944SShiju Jose .set_bank_group = cxl_mem_sparing_set_bank_group, \
1564588ca944SShiju Jose .get_bank = cxl_mem_sparing_get_bank, \
1565588ca944SShiju Jose .set_bank = cxl_mem_sparing_set_bank
1566588ca944SShiju Jose
1567588ca944SShiju Jose #define ROW_OPS \
1568588ca944SShiju Jose BANK_OPS, .get_row = cxl_mem_sparing_get_row, \
1569588ca944SShiju Jose .set_row = cxl_mem_sparing_set_row
1570588ca944SShiju Jose
1571588ca944SShiju Jose #define CACHELINE_OPS \
1572588ca944SShiju Jose ROW_OPS, .get_column = cxl_mem_sparing_get_column, \
1573588ca944SShiju Jose .set_column = cxl_mem_sparing_set_column, \
1574588ca944SShiju Jose .get_sub_channel = cxl_mem_sparing_get_sub_channel, \
1575588ca944SShiju Jose .set_sub_channel = cxl_mem_sparing_set_sub_channel
1576588ca944SShiju Jose
1577588ca944SShiju Jose static const struct edac_mem_repair_ops cxl_rank_sparing_ops = {
1578588ca944SShiju Jose RANK_OPS,
1579588ca944SShiju Jose };
1580588ca944SShiju Jose
1581588ca944SShiju Jose static const struct edac_mem_repair_ops cxl_bank_sparing_ops = {
1582588ca944SShiju Jose BANK_OPS,
1583588ca944SShiju Jose };
1584588ca944SShiju Jose
1585588ca944SShiju Jose static const struct edac_mem_repair_ops cxl_row_sparing_ops = {
1586588ca944SShiju Jose ROW_OPS,
1587588ca944SShiju Jose };
1588588ca944SShiju Jose
1589588ca944SShiju Jose static const struct edac_mem_repair_ops cxl_cacheline_sparing_ops = {
1590588ca944SShiju Jose CACHELINE_OPS,
1591588ca944SShiju Jose };
1592588ca944SShiju Jose
1593588ca944SShiju Jose struct cxl_mem_sparing_desc {
1594588ca944SShiju Jose const uuid_t repair_uuid;
1595588ca944SShiju Jose enum edac_mem_repair_type repair_type;
1596588ca944SShiju Jose const struct edac_mem_repair_ops *repair_ops;
1597588ca944SShiju Jose };
1598588ca944SShiju Jose
1599588ca944SShiju Jose static const struct cxl_mem_sparing_desc mem_sparing_desc[] = {
1600588ca944SShiju Jose {
1601588ca944SShiju Jose .repair_uuid = CXL_FEAT_CACHELINE_SPARING_UUID,
1602588ca944SShiju Jose .repair_type = EDAC_REPAIR_CACHELINE_SPARING,
1603588ca944SShiju Jose .repair_ops = &cxl_cacheline_sparing_ops,
1604588ca944SShiju Jose },
1605588ca944SShiju Jose {
1606588ca944SShiju Jose .repair_uuid = CXL_FEAT_ROW_SPARING_UUID,
1607588ca944SShiju Jose .repair_type = EDAC_REPAIR_ROW_SPARING,
1608588ca944SShiju Jose .repair_ops = &cxl_row_sparing_ops,
1609588ca944SShiju Jose },
1610588ca944SShiju Jose {
1611588ca944SShiju Jose .repair_uuid = CXL_FEAT_BANK_SPARING_UUID,
1612588ca944SShiju Jose .repair_type = EDAC_REPAIR_BANK_SPARING,
1613588ca944SShiju Jose .repair_ops = &cxl_bank_sparing_ops,
1614588ca944SShiju Jose },
1615588ca944SShiju Jose {
1616588ca944SShiju Jose .repair_uuid = CXL_FEAT_RANK_SPARING_UUID,
1617588ca944SShiju Jose .repair_type = EDAC_REPAIR_RANK_SPARING,
1618588ca944SShiju Jose .repair_ops = &cxl_rank_sparing_ops,
1619588ca944SShiju Jose },
1620588ca944SShiju Jose };
1621588ca944SShiju Jose
cxl_memdev_sparing_init(struct cxl_memdev * cxlmd,struct edac_dev_feature * ras_feature,const struct cxl_mem_sparing_desc * desc,u8 repair_inst)1622588ca944SShiju Jose static int cxl_memdev_sparing_init(struct cxl_memdev *cxlmd,
1623588ca944SShiju Jose struct edac_dev_feature *ras_feature,
1624588ca944SShiju Jose const struct cxl_mem_sparing_desc *desc,
1625588ca944SShiju Jose u8 repair_inst)
1626588ca944SShiju Jose {
1627588ca944SShiju Jose struct cxl_mem_sparing_context *cxl_sparing_ctx;
1628588ca944SShiju Jose struct cxl_feat_entry *feat_entry;
1629588ca944SShiju Jose int ret;
1630588ca944SShiju Jose
1631588ca944SShiju Jose feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
1632588ca944SShiju Jose &desc->repair_uuid);
1633588ca944SShiju Jose if (IS_ERR(feat_entry))
1634588ca944SShiju Jose return -EOPNOTSUPP;
1635588ca944SShiju Jose
1636588ca944SShiju Jose if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
1637588ca944SShiju Jose return -EOPNOTSUPP;
1638588ca944SShiju Jose
1639588ca944SShiju Jose cxl_sparing_ctx = devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sparing_ctx),
1640588ca944SShiju Jose GFP_KERNEL);
1641588ca944SShiju Jose if (!cxl_sparing_ctx)
1642588ca944SShiju Jose return -ENOMEM;
1643588ca944SShiju Jose
1644588ca944SShiju Jose *cxl_sparing_ctx = (struct cxl_mem_sparing_context){
1645588ca944SShiju Jose .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
1646588ca944SShiju Jose .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
1647588ca944SShiju Jose .get_version = feat_entry->get_feat_ver,
1648588ca944SShiju Jose .set_version = feat_entry->set_feat_ver,
1649588ca944SShiju Jose .effects = le16_to_cpu(feat_entry->effects),
1650588ca944SShiju Jose .cxlmd = cxlmd,
1651588ca944SShiju Jose .repair_type = desc->repair_type,
1652588ca944SShiju Jose .instance = repair_inst++,
1653588ca944SShiju Jose };
1654588ca944SShiju Jose uuid_copy(&cxl_sparing_ctx->repair_uuid, &desc->repair_uuid);
1655588ca944SShiju Jose
1656588ca944SShiju Jose ret = cxl_mem_sparing_get_attrbs(cxl_sparing_ctx);
1657588ca944SShiju Jose if (ret)
1658588ca944SShiju Jose return ret;
1659588ca944SShiju Jose
1660588ca944SShiju Jose if ((cxl_sparing_ctx->cap_soft_sparing &&
1661588ca944SShiju Jose cxl_sparing_ctx->cap_hard_sparing) ||
1662588ca944SShiju Jose cxl_sparing_ctx->cap_soft_sparing)
1663588ca944SShiju Jose cxl_sparing_ctx->persist_mode = 0;
1664588ca944SShiju Jose else if (cxl_sparing_ctx->cap_hard_sparing)
1665588ca944SShiju Jose cxl_sparing_ctx->persist_mode = 1;
1666588ca944SShiju Jose else
1667588ca944SShiju Jose return -EOPNOTSUPP;
1668588ca944SShiju Jose
1669588ca944SShiju Jose ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
1670588ca944SShiju Jose ras_feature->instance = cxl_sparing_ctx->instance;
1671588ca944SShiju Jose ras_feature->mem_repair_ops = desc->repair_ops;
1672588ca944SShiju Jose ras_feature->ctx = cxl_sparing_ctx;
1673588ca944SShiju Jose
1674588ca944SShiju Jose return 0;
1675588ca944SShiju Jose }
1676588ca944SShiju Jose
1677be9b359eSShiju Jose /*
1678be9b359eSShiju Jose * CXL memory soft PPR & hard PPR control
1679be9b359eSShiju Jose */
1680be9b359eSShiju Jose struct cxl_ppr_context {
1681be9b359eSShiju Jose uuid_t repair_uuid;
1682be9b359eSShiju Jose u8 instance;
1683be9b359eSShiju Jose u16 get_feat_size;
1684be9b359eSShiju Jose u16 set_feat_size;
1685be9b359eSShiju Jose u8 get_version;
1686be9b359eSShiju Jose u8 set_version;
1687be9b359eSShiju Jose u16 effects;
1688be9b359eSShiju Jose u8 op_class;
1689be9b359eSShiju Jose u8 op_subclass;
1690be9b359eSShiju Jose bool cap_dpa;
1691be9b359eSShiju Jose bool cap_nib_mask;
1692be9b359eSShiju Jose bool media_accessible;
1693be9b359eSShiju Jose bool data_retained;
1694be9b359eSShiju Jose struct cxl_memdev *cxlmd;
1695be9b359eSShiju Jose enum edac_mem_repair_type repair_type;
1696be9b359eSShiju Jose bool persist_mode;
1697be9b359eSShiju Jose u64 dpa;
1698be9b359eSShiju Jose u32 nibble_mask;
1699be9b359eSShiju Jose };
1700be9b359eSShiju Jose
1701be9b359eSShiju Jose /*
1702be9b359eSShiju Jose * See CXL rev 3.2 @8.2.10.7.2.1 Table 8-128 sPPR Feature Readable Attributes
1703be9b359eSShiju Jose *
1704be9b359eSShiju Jose * See CXL rev 3.2 @8.2.10.7.2.2 Table 8-131 hPPR Feature Readable Attributes
1705be9b359eSShiju Jose */
1706be9b359eSShiju Jose
1707be9b359eSShiju Jose #define CXL_PPR_OP_CAP_DEVICE_INITIATED BIT(0)
1708be9b359eSShiju Jose #define CXL_PPR_OP_MODE_DEV_INITIATED BIT(0)
1709be9b359eSShiju Jose
1710be9b359eSShiju Jose #define CXL_PPR_FLAG_DPA_SUPPORT_MASK BIT(0)
1711be9b359eSShiju Jose #define CXL_PPR_FLAG_NIB_SUPPORT_MASK BIT(1)
1712be9b359eSShiju Jose #define CXL_PPR_FLAG_MEM_SPARING_EV_REC_SUPPORT_MASK BIT(2)
1713be9b359eSShiju Jose #define CXL_PPR_FLAG_DEV_INITED_PPR_AT_BOOT_CAP_MASK BIT(3)
1714be9b359eSShiju Jose
1715be9b359eSShiju Jose #define CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK BIT(0)
1716be9b359eSShiju Jose #define CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK BIT(2)
1717be9b359eSShiju Jose
1718be9b359eSShiju Jose #define CXL_PPR_SPARING_EV_REC_EN_MASK BIT(0)
1719be9b359eSShiju Jose #define CXL_PPR_DEV_INITED_PPR_AT_BOOT_EN_MASK BIT(1)
1720be9b359eSShiju Jose
1721be9b359eSShiju Jose #define CXL_PPR_GET_CAP_DPA(flags) \
1722be9b359eSShiju Jose FIELD_GET(CXL_PPR_FLAG_DPA_SUPPORT_MASK, flags)
1723be9b359eSShiju Jose #define CXL_PPR_GET_CAP_NIB_MASK(flags) \
1724be9b359eSShiju Jose FIELD_GET(CXL_PPR_FLAG_NIB_SUPPORT_MASK, flags)
1725be9b359eSShiju Jose #define CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags) \
1726be9b359eSShiju Jose (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_MEDIA_ACCESSIBLE_MASK, \
1727be9b359eSShiju Jose restriction_flags) ^ 1)
1728be9b359eSShiju Jose #define CXL_PPR_GET_DATA_RETAINED(restriction_flags) \
1729be9b359eSShiju Jose (FIELD_GET(CXL_PPR_RESTRICTION_FLAG_DATA_RETAINED_MASK, \
1730be9b359eSShiju Jose restriction_flags) ^ 1)
1731be9b359eSShiju Jose
1732be9b359eSShiju Jose struct cxl_memdev_ppr_rd_attrbs {
1733be9b359eSShiju Jose struct cxl_memdev_repair_rd_attrbs_hdr hdr;
1734be9b359eSShiju Jose u8 ppr_flags;
1735be9b359eSShiju Jose __le16 restriction_flags;
1736be9b359eSShiju Jose u8 ppr_op_mode;
1737be9b359eSShiju Jose } __packed;
1738be9b359eSShiju Jose
1739be9b359eSShiju Jose /*
1740be9b359eSShiju Jose * See CXL rev 3.2 @8.2.10.7.1.2 Table 8-118 sPPR Maintenance Input Payload
1741be9b359eSShiju Jose *
1742be9b359eSShiju Jose * See CXL rev 3.2 @8.2.10.7.1.3 Table 8-119 hPPR Maintenance Input Payload
1743be9b359eSShiju Jose */
1744be9b359eSShiju Jose struct cxl_memdev_ppr_maintenance_attrbs {
1745be9b359eSShiju Jose u8 flags;
1746be9b359eSShiju Jose __le64 dpa;
1747be9b359eSShiju Jose u8 nibble_mask[3];
1748be9b359eSShiju Jose } __packed;
1749be9b359eSShiju Jose
cxl_mem_ppr_get_attrbs(struct cxl_ppr_context * cxl_ppr_ctx)1750be9b359eSShiju Jose static int cxl_mem_ppr_get_attrbs(struct cxl_ppr_context *cxl_ppr_ctx)
1751be9b359eSShiju Jose {
1752be9b359eSShiju Jose size_t rd_data_size = sizeof(struct cxl_memdev_ppr_rd_attrbs);
1753be9b359eSShiju Jose struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
1754be9b359eSShiju Jose struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
1755be9b359eSShiju Jose u16 restriction_flags;
1756be9b359eSShiju Jose size_t data_size;
1757be9b359eSShiju Jose u16 return_code;
1758be9b359eSShiju Jose
1759be9b359eSShiju Jose struct cxl_memdev_ppr_rd_attrbs *rd_attrbs __free(kfree) =
1760be9b359eSShiju Jose kmalloc(rd_data_size, GFP_KERNEL);
1761be9b359eSShiju Jose if (!rd_attrbs)
1762be9b359eSShiju Jose return -ENOMEM;
1763be9b359eSShiju Jose
1764be9b359eSShiju Jose data_size = cxl_get_feature(cxl_mbox, &cxl_ppr_ctx->repair_uuid,
1765be9b359eSShiju Jose CXL_GET_FEAT_SEL_CURRENT_VALUE, rd_attrbs,
1766be9b359eSShiju Jose rd_data_size, 0, &return_code);
1767be9b359eSShiju Jose if (!data_size)
1768be9b359eSShiju Jose return -EIO;
1769be9b359eSShiju Jose
1770be9b359eSShiju Jose cxl_ppr_ctx->op_class = rd_attrbs->hdr.op_class;
1771be9b359eSShiju Jose cxl_ppr_ctx->op_subclass = rd_attrbs->hdr.op_subclass;
1772be9b359eSShiju Jose cxl_ppr_ctx->cap_dpa = CXL_PPR_GET_CAP_DPA(rd_attrbs->ppr_flags);
1773be9b359eSShiju Jose cxl_ppr_ctx->cap_nib_mask =
1774be9b359eSShiju Jose CXL_PPR_GET_CAP_NIB_MASK(rd_attrbs->ppr_flags);
1775be9b359eSShiju Jose
1776be9b359eSShiju Jose restriction_flags = le16_to_cpu(rd_attrbs->restriction_flags);
1777be9b359eSShiju Jose cxl_ppr_ctx->media_accessible =
1778be9b359eSShiju Jose CXL_PPR_GET_MEDIA_ACCESSIBLE(restriction_flags);
1779be9b359eSShiju Jose cxl_ppr_ctx->data_retained =
1780be9b359eSShiju Jose CXL_PPR_GET_DATA_RETAINED(restriction_flags);
1781be9b359eSShiju Jose
1782be9b359eSShiju Jose return 0;
1783be9b359eSShiju Jose }
1784be9b359eSShiju Jose
cxl_mem_perform_ppr(struct cxl_ppr_context * cxl_ppr_ctx)1785be9b359eSShiju Jose static int cxl_mem_perform_ppr(struct cxl_ppr_context *cxl_ppr_ctx)
1786be9b359eSShiju Jose {
1787be9b359eSShiju Jose struct cxl_memdev_ppr_maintenance_attrbs maintenance_attrbs;
1788be9b359eSShiju Jose struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
1789be9b359eSShiju Jose struct cxl_mem_repair_attrbs attrbs = { 0 };
1790be9b359eSShiju Jose
1791be9b359eSShiju Jose struct rw_semaphore *region_lock __free(rwsem_read_release) =
1792be9b359eSShiju Jose rwsem_read_intr_acquire(&cxl_region_rwsem);
1793be9b359eSShiju Jose if (!region_lock)
1794be9b359eSShiju Jose return -EINTR;
1795be9b359eSShiju Jose
1796be9b359eSShiju Jose struct rw_semaphore *dpa_lock __free(rwsem_read_release) =
1797be9b359eSShiju Jose rwsem_read_intr_acquire(&cxl_dpa_rwsem);
1798be9b359eSShiju Jose if (!dpa_lock)
1799be9b359eSShiju Jose return -EINTR;
1800be9b359eSShiju Jose
1801be9b359eSShiju Jose if (!cxl_ppr_ctx->media_accessible || !cxl_ppr_ctx->data_retained) {
1802be9b359eSShiju Jose /* Memory to repair must be offline */
1803be9b359eSShiju Jose if (cxl_is_memdev_memory_online(cxlmd))
1804be9b359eSShiju Jose return -EBUSY;
1805be9b359eSShiju Jose } else {
1806be9b359eSShiju Jose if (cxl_is_memdev_memory_online(cxlmd)) {
1807be9b359eSShiju Jose /* Check memory to repair is from the current boot */
1808be9b359eSShiju Jose attrbs.repair_type = CXL_PPR;
1809be9b359eSShiju Jose attrbs.dpa = cxl_ppr_ctx->dpa;
1810be9b359eSShiju Jose attrbs.nibble_mask = cxl_ppr_ctx->nibble_mask;
1811be9b359eSShiju Jose if (!cxl_find_rec_dram(cxlmd, &attrbs) &&
1812be9b359eSShiju Jose !cxl_find_rec_gen_media(cxlmd, &attrbs))
1813be9b359eSShiju Jose return -EINVAL;
1814be9b359eSShiju Jose }
1815be9b359eSShiju Jose }
1816be9b359eSShiju Jose
1817be9b359eSShiju Jose memset(&maintenance_attrbs, 0, sizeof(maintenance_attrbs));
1818be9b359eSShiju Jose maintenance_attrbs.flags = 0;
1819be9b359eSShiju Jose maintenance_attrbs.dpa = cpu_to_le64(cxl_ppr_ctx->dpa);
1820be9b359eSShiju Jose put_unaligned_le24(cxl_ppr_ctx->nibble_mask,
1821be9b359eSShiju Jose maintenance_attrbs.nibble_mask);
1822be9b359eSShiju Jose
1823be9b359eSShiju Jose return cxl_perform_maintenance(&cxlmd->cxlds->cxl_mbox,
1824be9b359eSShiju Jose cxl_ppr_ctx->op_class,
1825be9b359eSShiju Jose cxl_ppr_ctx->op_subclass,
1826be9b359eSShiju Jose &maintenance_attrbs,
1827be9b359eSShiju Jose sizeof(maintenance_attrbs));
1828be9b359eSShiju Jose }
1829be9b359eSShiju Jose
cxl_ppr_get_repair_type(struct device * dev,void * drv_data,const char ** repair_type)1830be9b359eSShiju Jose static int cxl_ppr_get_repair_type(struct device *dev, void *drv_data,
1831be9b359eSShiju Jose const char **repair_type)
1832be9b359eSShiju Jose {
1833be9b359eSShiju Jose *repair_type = edac_repair_type[EDAC_REPAIR_PPR];
1834be9b359eSShiju Jose
1835be9b359eSShiju Jose return 0;
1836be9b359eSShiju Jose }
1837be9b359eSShiju Jose
cxl_ppr_get_persist_mode(struct device * dev,void * drv_data,bool * persist_mode)1838be9b359eSShiju Jose static int cxl_ppr_get_persist_mode(struct device *dev, void *drv_data,
1839be9b359eSShiju Jose bool *persist_mode)
1840be9b359eSShiju Jose {
1841be9b359eSShiju Jose struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
1842be9b359eSShiju Jose
1843be9b359eSShiju Jose *persist_mode = cxl_ppr_ctx->persist_mode;
1844be9b359eSShiju Jose
1845be9b359eSShiju Jose return 0;
1846be9b359eSShiju Jose }
1847be9b359eSShiju Jose
cxl_get_ppr_safe_when_in_use(struct device * dev,void * drv_data,bool * safe)1848be9b359eSShiju Jose static int cxl_get_ppr_safe_when_in_use(struct device *dev, void *drv_data,
1849be9b359eSShiju Jose bool *safe)
1850be9b359eSShiju Jose {
1851be9b359eSShiju Jose struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
1852be9b359eSShiju Jose
1853be9b359eSShiju Jose *safe = cxl_ppr_ctx->media_accessible & cxl_ppr_ctx->data_retained;
1854be9b359eSShiju Jose
1855be9b359eSShiju Jose return 0;
1856be9b359eSShiju Jose }
1857be9b359eSShiju Jose
cxl_ppr_get_min_dpa(struct device * dev,void * drv_data,u64 * min_dpa)1858be9b359eSShiju Jose static int cxl_ppr_get_min_dpa(struct device *dev, void *drv_data, u64 *min_dpa)
1859be9b359eSShiju Jose {
1860be9b359eSShiju Jose struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
1861be9b359eSShiju Jose struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
1862be9b359eSShiju Jose struct cxl_dev_state *cxlds = cxlmd->cxlds;
1863be9b359eSShiju Jose
1864be9b359eSShiju Jose *min_dpa = cxlds->dpa_res.start;
1865be9b359eSShiju Jose
1866be9b359eSShiju Jose return 0;
1867be9b359eSShiju Jose }
1868be9b359eSShiju Jose
cxl_ppr_get_max_dpa(struct device * dev,void * drv_data,u64 * max_dpa)1869be9b359eSShiju Jose static int cxl_ppr_get_max_dpa(struct device *dev, void *drv_data, u64 *max_dpa)
1870be9b359eSShiju Jose {
1871be9b359eSShiju Jose struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
1872be9b359eSShiju Jose struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
1873be9b359eSShiju Jose struct cxl_dev_state *cxlds = cxlmd->cxlds;
1874be9b359eSShiju Jose
1875be9b359eSShiju Jose *max_dpa = cxlds->dpa_res.end;
1876be9b359eSShiju Jose
1877be9b359eSShiju Jose return 0;
1878be9b359eSShiju Jose }
1879be9b359eSShiju Jose
cxl_ppr_get_dpa(struct device * dev,void * drv_data,u64 * dpa)1880be9b359eSShiju Jose static int cxl_ppr_get_dpa(struct device *dev, void *drv_data, u64 *dpa)
1881be9b359eSShiju Jose {
1882be9b359eSShiju Jose struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
1883be9b359eSShiju Jose
1884be9b359eSShiju Jose *dpa = cxl_ppr_ctx->dpa;
1885be9b359eSShiju Jose
1886be9b359eSShiju Jose return 0;
1887be9b359eSShiju Jose }
1888be9b359eSShiju Jose
cxl_ppr_set_dpa(struct device * dev,void * drv_data,u64 dpa)1889be9b359eSShiju Jose static int cxl_ppr_set_dpa(struct device *dev, void *drv_data, u64 dpa)
1890be9b359eSShiju Jose {
1891be9b359eSShiju Jose struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
1892be9b359eSShiju Jose struct cxl_memdev *cxlmd = cxl_ppr_ctx->cxlmd;
1893be9b359eSShiju Jose struct cxl_dev_state *cxlds = cxlmd->cxlds;
1894be9b359eSShiju Jose
1895be9b359eSShiju Jose if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end)
1896be9b359eSShiju Jose return -EINVAL;
1897be9b359eSShiju Jose
1898be9b359eSShiju Jose cxl_ppr_ctx->dpa = dpa;
1899be9b359eSShiju Jose
1900be9b359eSShiju Jose return 0;
1901be9b359eSShiju Jose }
1902be9b359eSShiju Jose
cxl_ppr_get_nibble_mask(struct device * dev,void * drv_data,u32 * nibble_mask)1903be9b359eSShiju Jose static int cxl_ppr_get_nibble_mask(struct device *dev, void *drv_data,
1904be9b359eSShiju Jose u32 *nibble_mask)
1905be9b359eSShiju Jose {
1906be9b359eSShiju Jose struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
1907be9b359eSShiju Jose
1908be9b359eSShiju Jose *nibble_mask = cxl_ppr_ctx->nibble_mask;
1909be9b359eSShiju Jose
1910be9b359eSShiju Jose return 0;
1911be9b359eSShiju Jose }
1912be9b359eSShiju Jose
cxl_ppr_set_nibble_mask(struct device * dev,void * drv_data,u32 nibble_mask)1913be9b359eSShiju Jose static int cxl_ppr_set_nibble_mask(struct device *dev, void *drv_data,
1914be9b359eSShiju Jose u32 nibble_mask)
1915be9b359eSShiju Jose {
1916be9b359eSShiju Jose struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
1917be9b359eSShiju Jose
1918be9b359eSShiju Jose cxl_ppr_ctx->nibble_mask = nibble_mask;
1919be9b359eSShiju Jose
1920be9b359eSShiju Jose return 0;
1921be9b359eSShiju Jose }
1922be9b359eSShiju Jose
cxl_do_ppr(struct device * dev,void * drv_data,u32 val)1923be9b359eSShiju Jose static int cxl_do_ppr(struct device *dev, void *drv_data, u32 val)
1924be9b359eSShiju Jose {
1925be9b359eSShiju Jose struct cxl_ppr_context *cxl_ppr_ctx = drv_data;
1926be9b359eSShiju Jose
1927be9b359eSShiju Jose if (!cxl_ppr_ctx->dpa || val != EDAC_DO_MEM_REPAIR)
1928be9b359eSShiju Jose return -EINVAL;
1929be9b359eSShiju Jose
1930be9b359eSShiju Jose return cxl_mem_perform_ppr(cxl_ppr_ctx);
1931be9b359eSShiju Jose }
1932be9b359eSShiju Jose
1933be9b359eSShiju Jose static const struct edac_mem_repair_ops cxl_sppr_ops = {
1934be9b359eSShiju Jose .get_repair_type = cxl_ppr_get_repair_type,
1935be9b359eSShiju Jose .get_persist_mode = cxl_ppr_get_persist_mode,
1936be9b359eSShiju Jose .get_repair_safe_when_in_use = cxl_get_ppr_safe_when_in_use,
1937be9b359eSShiju Jose .get_min_dpa = cxl_ppr_get_min_dpa,
1938be9b359eSShiju Jose .get_max_dpa = cxl_ppr_get_max_dpa,
1939be9b359eSShiju Jose .get_dpa = cxl_ppr_get_dpa,
1940be9b359eSShiju Jose .set_dpa = cxl_ppr_set_dpa,
1941be9b359eSShiju Jose .get_nibble_mask = cxl_ppr_get_nibble_mask,
1942be9b359eSShiju Jose .set_nibble_mask = cxl_ppr_set_nibble_mask,
1943be9b359eSShiju Jose .do_repair = cxl_do_ppr,
1944be9b359eSShiju Jose };
1945be9b359eSShiju Jose
cxl_memdev_soft_ppr_init(struct cxl_memdev * cxlmd,struct edac_dev_feature * ras_feature,u8 repair_inst)1946be9b359eSShiju Jose static int cxl_memdev_soft_ppr_init(struct cxl_memdev *cxlmd,
1947be9b359eSShiju Jose struct edac_dev_feature *ras_feature,
1948be9b359eSShiju Jose u8 repair_inst)
1949be9b359eSShiju Jose {
1950be9b359eSShiju Jose struct cxl_ppr_context *cxl_sppr_ctx;
1951be9b359eSShiju Jose struct cxl_feat_entry *feat_entry;
1952be9b359eSShiju Jose int ret;
1953be9b359eSShiju Jose
1954be9b359eSShiju Jose feat_entry = cxl_feature_info(to_cxlfs(cxlmd->cxlds),
1955be9b359eSShiju Jose &CXL_FEAT_SPPR_UUID);
1956be9b359eSShiju Jose if (IS_ERR(feat_entry))
1957be9b359eSShiju Jose return -EOPNOTSUPP;
1958be9b359eSShiju Jose
1959be9b359eSShiju Jose if (!(le32_to_cpu(feat_entry->flags) & CXL_FEATURE_F_CHANGEABLE))
1960be9b359eSShiju Jose return -EOPNOTSUPP;
1961be9b359eSShiju Jose
1962be9b359eSShiju Jose cxl_sppr_ctx =
1963be9b359eSShiju Jose devm_kzalloc(&cxlmd->dev, sizeof(*cxl_sppr_ctx), GFP_KERNEL);
1964be9b359eSShiju Jose if (!cxl_sppr_ctx)
1965be9b359eSShiju Jose return -ENOMEM;
1966be9b359eSShiju Jose
1967be9b359eSShiju Jose *cxl_sppr_ctx = (struct cxl_ppr_context){
1968be9b359eSShiju Jose .get_feat_size = le16_to_cpu(feat_entry->get_feat_size),
1969be9b359eSShiju Jose .set_feat_size = le16_to_cpu(feat_entry->set_feat_size),
1970be9b359eSShiju Jose .get_version = feat_entry->get_feat_ver,
1971be9b359eSShiju Jose .set_version = feat_entry->set_feat_ver,
1972be9b359eSShiju Jose .effects = le16_to_cpu(feat_entry->effects),
1973be9b359eSShiju Jose .cxlmd = cxlmd,
1974be9b359eSShiju Jose .repair_type = EDAC_REPAIR_PPR,
1975be9b359eSShiju Jose .persist_mode = 0,
1976be9b359eSShiju Jose .instance = repair_inst,
1977be9b359eSShiju Jose };
1978be9b359eSShiju Jose uuid_copy(&cxl_sppr_ctx->repair_uuid, &CXL_FEAT_SPPR_UUID);
1979be9b359eSShiju Jose
1980be9b359eSShiju Jose ret = cxl_mem_ppr_get_attrbs(cxl_sppr_ctx);
1981be9b359eSShiju Jose if (ret)
1982be9b359eSShiju Jose return ret;
1983be9b359eSShiju Jose
1984be9b359eSShiju Jose ras_feature->ft_type = RAS_FEAT_MEM_REPAIR;
1985be9b359eSShiju Jose ras_feature->instance = cxl_sppr_ctx->instance;
1986be9b359eSShiju Jose ras_feature->mem_repair_ops = &cxl_sppr_ops;
1987be9b359eSShiju Jose ras_feature->ctx = cxl_sppr_ctx;
1988be9b359eSShiju Jose
1989be9b359eSShiju Jose return 0;
1990be9b359eSShiju Jose }
1991be9b359eSShiju Jose
devm_cxl_memdev_edac_register(struct cxl_memdev * cxlmd)19920c6e6f13SShiju Jose int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd)
19930c6e6f13SShiju Jose {
19940c6e6f13SShiju Jose struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
19950c6e6f13SShiju Jose int num_ras_features = 0;
19960b5ccb0dSShiju Jose u8 repair_inst = 0;
19970c6e6f13SShiju Jose int rc;
19980c6e6f13SShiju Jose
19990c6e6f13SShiju Jose if (IS_ENABLED(CONFIG_CXL_EDAC_SCRUB)) {
20000c6e6f13SShiju Jose rc = cxl_memdev_scrub_init(cxlmd, &ras_features[num_ras_features], 0);
20010c6e6f13SShiju Jose if (rc < 0 && rc != -EOPNOTSUPP)
20020c6e6f13SShiju Jose return rc;
20030c6e6f13SShiju Jose
20040c6e6f13SShiju Jose if (rc != -EOPNOTSUPP)
20050c6e6f13SShiju Jose num_ras_features++;
20060c6e6f13SShiju Jose }
20070c6e6f13SShiju Jose
200885fb6a16SShiju Jose if (IS_ENABLED(CONFIG_CXL_EDAC_ECS)) {
200985fb6a16SShiju Jose rc = cxl_memdev_ecs_init(cxlmd, &ras_features[num_ras_features]);
201085fb6a16SShiju Jose if (rc < 0 && rc != -EOPNOTSUPP)
201185fb6a16SShiju Jose return rc;
201285fb6a16SShiju Jose
201385fb6a16SShiju Jose if (rc != -EOPNOTSUPP)
201485fb6a16SShiju Jose num_ras_features++;
201585fb6a16SShiju Jose }
201685fb6a16SShiju Jose
20170b5ccb0dSShiju Jose if (IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR)) {
2018588ca944SShiju Jose for (int i = 0; i < CXL_MEM_SPARING_MAX; i++) {
2019588ca944SShiju Jose rc = cxl_memdev_sparing_init(cxlmd,
2020588ca944SShiju Jose &ras_features[num_ras_features],
2021588ca944SShiju Jose &mem_sparing_desc[i], repair_inst);
2022588ca944SShiju Jose if (rc == -EOPNOTSUPP)
2023588ca944SShiju Jose continue;
2024588ca944SShiju Jose if (rc < 0)
2025588ca944SShiju Jose return rc;
2026588ca944SShiju Jose
2027588ca944SShiju Jose repair_inst++;
2028588ca944SShiju Jose num_ras_features++;
2029588ca944SShiju Jose }
2030588ca944SShiju Jose
2031be9b359eSShiju Jose rc = cxl_memdev_soft_ppr_init(cxlmd, &ras_features[num_ras_features],
2032be9b359eSShiju Jose repair_inst);
2033be9b359eSShiju Jose if (rc < 0 && rc != -EOPNOTSUPP)
2034be9b359eSShiju Jose return rc;
2035be9b359eSShiju Jose
2036be9b359eSShiju Jose if (rc != -EOPNOTSUPP) {
2037be9b359eSShiju Jose repair_inst++;
2038be9b359eSShiju Jose num_ras_features++;
2039be9b359eSShiju Jose }
2040be9b359eSShiju Jose
20410b5ccb0dSShiju Jose if (repair_inst) {
20420b5ccb0dSShiju Jose struct cxl_mem_err_rec *array_rec =
20430b5ccb0dSShiju Jose devm_kzalloc(&cxlmd->dev, sizeof(*array_rec),
20440b5ccb0dSShiju Jose GFP_KERNEL);
20450b5ccb0dSShiju Jose if (!array_rec)
20460b5ccb0dSShiju Jose return -ENOMEM;
20470b5ccb0dSShiju Jose
20480b5ccb0dSShiju Jose xa_init(&array_rec->rec_gen_media);
20490b5ccb0dSShiju Jose xa_init(&array_rec->rec_dram);
20500b5ccb0dSShiju Jose cxlmd->err_rec_array = array_rec;
20510b5ccb0dSShiju Jose }
20520b5ccb0dSShiju Jose }
20530b5ccb0dSShiju Jose
20540c6e6f13SShiju Jose if (!num_ras_features)
20550c6e6f13SShiju Jose return -EINVAL;
20560c6e6f13SShiju Jose
20570c6e6f13SShiju Jose char *cxl_dev_name __free(kfree) =
20580c6e6f13SShiju Jose kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlmd->dev));
20590c6e6f13SShiju Jose if (!cxl_dev_name)
20600c6e6f13SShiju Jose return -ENOMEM;
20610c6e6f13SShiju Jose
20620c6e6f13SShiju Jose return edac_dev_register(&cxlmd->dev, cxl_dev_name, NULL,
20630c6e6f13SShiju Jose num_ras_features, ras_features);
20640c6e6f13SShiju Jose }
20650c6e6f13SShiju Jose EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_register, "CXL");
20660c6e6f13SShiju Jose
devm_cxl_region_edac_register(struct cxl_region * cxlr)20670c6e6f13SShiju Jose int devm_cxl_region_edac_register(struct cxl_region *cxlr)
20680c6e6f13SShiju Jose {
20690c6e6f13SShiju Jose struct edac_dev_feature ras_features[CXL_NR_EDAC_DEV_FEATURES];
20700c6e6f13SShiju Jose int num_ras_features = 0;
20710c6e6f13SShiju Jose int rc;
20720c6e6f13SShiju Jose
20730c6e6f13SShiju Jose if (!IS_ENABLED(CONFIG_CXL_EDAC_SCRUB))
20740c6e6f13SShiju Jose return 0;
20750c6e6f13SShiju Jose
20760c6e6f13SShiju Jose rc = cxl_region_scrub_init(cxlr, &ras_features[num_ras_features], 0);
20770c6e6f13SShiju Jose if (rc < 0)
20780c6e6f13SShiju Jose return rc;
20790c6e6f13SShiju Jose
20800c6e6f13SShiju Jose num_ras_features++;
20810c6e6f13SShiju Jose
20820c6e6f13SShiju Jose char *cxl_dev_name __free(kfree) =
20830c6e6f13SShiju Jose kasprintf(GFP_KERNEL, "cxl_%s", dev_name(&cxlr->dev));
20840c6e6f13SShiju Jose if (!cxl_dev_name)
20850c6e6f13SShiju Jose return -ENOMEM;
20860c6e6f13SShiju Jose
20870c6e6f13SShiju Jose return edac_dev_register(&cxlr->dev, cxl_dev_name, NULL,
20880c6e6f13SShiju Jose num_ras_features, ras_features);
20890c6e6f13SShiju Jose }
20900c6e6f13SShiju Jose EXPORT_SYMBOL_NS_GPL(devm_cxl_region_edac_register, "CXL");
20910b5ccb0dSShiju Jose
devm_cxl_memdev_edac_release(struct cxl_memdev * cxlmd)20920b5ccb0dSShiju Jose void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd)
20930b5ccb0dSShiju Jose {
20940b5ccb0dSShiju Jose struct cxl_mem_err_rec *array_rec = cxlmd->err_rec_array;
20950b5ccb0dSShiju Jose struct cxl_event_gen_media *rec_gen_media;
20960b5ccb0dSShiju Jose struct cxl_event_dram *rec_dram;
20970b5ccb0dSShiju Jose unsigned long index;
20980b5ccb0dSShiju Jose
20990b5ccb0dSShiju Jose if (!IS_ENABLED(CONFIG_CXL_EDAC_MEM_REPAIR) || !array_rec)
21000b5ccb0dSShiju Jose return;
21010b5ccb0dSShiju Jose
21020b5ccb0dSShiju Jose xa_for_each(&array_rec->rec_dram, index, rec_dram)
21030b5ccb0dSShiju Jose kfree(rec_dram);
21040b5ccb0dSShiju Jose xa_destroy(&array_rec->rec_dram);
21050b5ccb0dSShiju Jose
21060b5ccb0dSShiju Jose xa_for_each(&array_rec->rec_gen_media, index, rec_gen_media)
21070b5ccb0dSShiju Jose kfree(rec_gen_media);
21080b5ccb0dSShiju Jose xa_destroy(&array_rec->rec_gen_media);
21090b5ccb0dSShiju Jose }
21100b5ccb0dSShiju Jose EXPORT_SYMBOL_NS_GPL(devm_cxl_memdev_edac_release, "CXL");
2111