Lines Matching +full:channel +full:- +full:3
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (c) 2018-2021 Intel Corporation
7 #include <linux/devm-helpers.h>
12 #include <linux/peci-cpu.h>
20 /* Max number of channel ranks and DIMM index per channel */
22 #define DIMM_IDX_MAX_ON_HSX 3
24 #define DIMM_IDX_MAX_ON_BDX 3
95 int dimm_order = dimm_no % priv->gen_info->dimm_idx_max; in get_dimm_temp()
96 int chan_rank = dimm_no / priv->gen_info->dimm_idx_max; in get_dimm_temp()
100 mutex_lock(&priv->dimm[dimm_no].temp.state.lock); in get_dimm_temp()
101 if (!peci_sensor_need_update(&priv->dimm[dimm_no].temp.state)) in get_dimm_temp()
104 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &data); in get_dimm_temp()
108 priv->dimm[dimm_no].temp.value = __dimm_temp(data, dimm_order) * MILLIDEGREE_PER_DEGREE; in get_dimm_temp()
110 peci_sensor_mark_updated(&priv->dimm[dimm_no].temp.state); in get_dimm_temp()
113 *val = priv->dimm[dimm_no].temp.value; in get_dimm_temp()
115 mutex_unlock(&priv->dimm[dimm_no].temp.state.lock); in get_dimm_temp()
121 int dimm_order = dimm_no % priv->gen_info->dimm_idx_max; in update_thresholds()
122 int chan_rank = dimm_no / priv->gen_info->dimm_idx_max; in update_thresholds()
126 if (!peci_sensor_need_update(&priv->dimm[dimm_no].thresholds.state)) in update_thresholds()
129 ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data); in update_thresholds()
130 if (ret == -ENODATA) /* Use default or previous value */ in update_thresholds()
135 priv->dimm[dimm_no].thresholds.temp_max = GET_TEMP_MAX(data) * MILLIDEGREE_PER_DEGREE; in update_thresholds()
136 priv->dimm[dimm_no].thresholds.temp_crit = GET_TEMP_CRIT(data) * MILLIDEGREE_PER_DEGREE; in update_thresholds()
138 peci_sensor_mark_updated(&priv->dimm[dimm_no].thresholds.state); in update_thresholds()
148 mutex_lock(&priv->dimm[dimm_no].thresholds.state.lock); in get_dimm_thresholds()
155 *val = priv->dimm[dimm_no].thresholds.temp_max; in get_dimm_thresholds()
158 *val = priv->dimm[dimm_no].thresholds.temp_crit; in get_dimm_thresholds()
161 ret = -EOPNOTSUPP; in get_dimm_thresholds()
165 mutex_unlock(&priv->dimm[dimm_no].thresholds.state.lock); in get_dimm_thresholds()
172 u32 attr, int channel, const char **str) in dimmtemp_read_string() argument
177 return -EOPNOTSUPP; in dimmtemp_read_string()
179 *str = (const char *)priv->dimmtemp_label[channel]; in dimmtemp_read_string()
185 u32 attr, int channel, long *val) in dimmtemp_read() argument
191 return get_dimm_temp(priv, channel, val); in dimmtemp_read()
193 return get_dimm_thresholds(priv, temp_max_type, channel, val); in dimmtemp_read()
195 return get_dimm_thresholds(priv, temp_crit_type, channel, val); in dimmtemp_read()
200 return -EOPNOTSUPP; in dimmtemp_read()
204 u32 attr, int channel) in dimmtemp_is_visible() argument
208 if (test_bit(channel, priv->dimm_mask)) in dimmtemp_is_visible()
222 int chan_rank_max = priv->gen_info->chan_rank_max; in check_populated_dimms()
223 int dimm_idx_max = priv->gen_info->dimm_idx_max; in check_populated_dimms()
231 WARN_ONCE(1, "Unsupported number of DIMMs - chan_rank_max: %d, dimm_idx_max: %d", in check_populated_dimms()
233 return -EINVAL; in check_populated_dimms()
240 ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &pcs); in check_populated_dimms()
243 * Overall, we expect either success or -EINVAL in in check_populated_dimms()
248 if (ret == -EINVAL) { in check_populated_dimms()
253 return -EAGAIN; in check_populated_dimms()
262 * If we got all -EINVALs, it means that the CPU doesn't have any in check_populated_dimms()
268 if (priv->no_dimm_retry_count < NO_DIMM_RETRY_COUNT_MAX) { in check_populated_dimms()
269 priv->no_dimm_retry_count++; in check_populated_dimms()
271 return -EAGAIN; in check_populated_dimms()
274 return -ENODEV; in check_populated_dimms()
282 priv->no_dimm_retry_count = 0; in check_populated_dimms()
283 return -EAGAIN; in check_populated_dimms()
287 dev_dbg(priv->dev, "Found DIMM%#x\n", i); in check_populated_dimms()
290 bitmap_copy(priv->dimm_mask, dimm_mask, DIMM_NUMS_MAX); in check_populated_dimms()
297 int rank = chan / priv->gen_info->dimm_idx_max; in create_dimm_temp_label()
298 int idx = chan % priv->gen_info->dimm_idx_max; in create_dimm_temp_label()
300 priv->dimmtemp_label[chan] = devm_kasprintf(priv->dev, GFP_KERNEL, in create_dimm_temp_label()
303 if (!priv->dimmtemp_label[chan]) in create_dimm_temp_label()
304 return -ENOMEM; in create_dimm_temp_label()
311 [0 ... DIMM_NUMS_MAX - 1] = HWMON_T_LABEL |
330 * allows to check DIMM state - causing us to retry later on. in create_dimm_temp_info()
333 if (ret == -ENODEV) { in create_dimm_temp_info()
334 dev_dbg(priv->dev, "No DIMMs found\n"); in create_dimm_temp_info()
337 schedule_delayed_work(&priv->detect_work, DIMM_MASK_CHECK_DELAY_JIFFIES); in create_dimm_temp_info()
338 dev_dbg(priv->dev, "Deferred populating DIMM temp info\n"); in create_dimm_temp_info()
342 channels = priv->gen_info->chan_rank_max * priv->gen_info->dimm_idx_max; in create_dimm_temp_info()
344 priv->dimmtemp_label = devm_kzalloc(priv->dev, channels * sizeof(char *), GFP_KERNEL); in create_dimm_temp_info()
345 if (!priv->dimmtemp_label) in create_dimm_temp_info()
346 return -ENOMEM; in create_dimm_temp_info()
348 for_each_set_bit(i, priv->dimm_mask, DIMM_NUMS_MAX) { in create_dimm_temp_info()
352 mutex_init(&priv->dimm[i].thresholds.state.lock); in create_dimm_temp_info()
353 mutex_init(&priv->dimm[i].temp.state.lock); in create_dimm_temp_info()
356 dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, priv, in create_dimm_temp_info()
359 dev_err(priv->dev, "Failed to register hwmon device\n"); in create_dimm_temp_info()
363 dev_dbg(priv->dev, "%s: sensor '%s'\n", dev_name(dev), priv->name); in create_dimm_temp_info()
376 if (ret && ret != -EAGAIN) in create_dimm_temp_info_delayed()
377 dev_err(priv->dev, "Failed to populate DIMM temp info\n"); in create_dimm_temp_info_delayed()
382 struct device *dev = &adev->dev; in peci_dimmtemp_probe()
383 struct peci_device *peci_dev = to_peci_device(dev->parent); in peci_dimmtemp_probe()
389 return -ENOMEM; in peci_dimmtemp_probe()
391 priv->name = devm_kasprintf(dev, GFP_KERNEL, "peci_dimmtemp.cpu%d", in peci_dimmtemp_probe()
392 peci_dev->info.socket_id); in peci_dimmtemp_probe()
393 if (!priv->name) in peci_dimmtemp_probe()
394 return -ENOMEM; in peci_dimmtemp_probe()
396 priv->dev = dev; in peci_dimmtemp_probe()
397 priv->peci_dev = peci_dev; in peci_dimmtemp_probe()
398 priv->gen_info = (const struct dimm_info *)id->driver_data; in peci_dimmtemp_probe()
405 if (peci_dev->info.peci_revision < priv->gen_info->min_peci_revision) in peci_dimmtemp_probe()
406 dev_warn(priv->dev, in peci_dimmtemp_probe()
408 peci_dev->info.peci_revision); in peci_dimmtemp_probe()
410 ret = devm_delayed_work_autocancel(priv->dev, &priv->detect_work, in peci_dimmtemp_probe()
416 if (ret && ret != -EAGAIN) { in peci_dimmtemp_probe()
432 * Device 20, Function 0: IMC 0 channel 0 -> rank 0 in read_thresholds_hsx()
433 * Device 20, Function 1: IMC 0 channel 1 -> rank 1 in read_thresholds_hsx()
434 * Device 21, Function 0: IMC 0 channel 2 -> rank 2 in read_thresholds_hsx()
435 * Device 21, Function 1: IMC 0 channel 3 -> rank 3 in read_thresholds_hsx()
436 * Device 23, Function 0: IMC 1 channel 0 -> rank 4 in read_thresholds_hsx()
437 * Device 23, Function 1: IMC 1 channel 1 -> rank 5 in read_thresholds_hsx()
438 * Device 24, Function 0: IMC 1 channel 2 -> rank 6 in read_thresholds_hsx()
439 * Device 24, Function 1: IMC 1 channel 3 -> rank 7 in read_thresholds_hsx()
445 ret = peci_pci_local_read(priv->peci_dev, 1, dev, func, reg, data); in read_thresholds_hsx()
460 * Device 10, Function 2: IMC 0 channel 0 -> rank 0 in read_thresholds_bdxd()
461 * Device 10, Function 6: IMC 0 channel 1 -> rank 1 in read_thresholds_bdxd()
462 * Device 12, Function 2: IMC 1 channel 0 -> rank 2 in read_thresholds_bdxd()
463 * Device 12, Function 6: IMC 1 channel 1 -> rank 3 in read_thresholds_bdxd()
469 ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data); in read_thresholds_bdxd()
484 * Device 10, Function 2: IMC 0 channel 0 -> rank 0 in read_thresholds_skx()
485 * Device 10, Function 6: IMC 0 channel 1 -> rank 1 in read_thresholds_skx()
486 * Device 11, Function 2: IMC 0 channel 2 -> rank 2 in read_thresholds_skx()
487 * Device 12, Function 2: IMC 1 channel 0 -> rank 3 in read_thresholds_skx()
488 * Device 12, Function 6: IMC 1 channel 1 -> rank 4 in read_thresholds_skx()
489 * Device 13, Function 2: IMC 1 channel 2 -> rank 5 in read_thresholds_skx()
491 dev = 10 + chan_rank / 3 * 2 + (chan_rank % 3 == 2 ? 1 : 0); in read_thresholds_skx()
492 func = chan_rank % 3 == 1 ? 6 : 2; in read_thresholds_skx()
495 ret = peci_pci_local_read(priv->peci_dev, 2, dev, func, reg, data); in read_thresholds_skx()
510 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, ®_val); in read_thresholds_icx()
512 return -ENODATA; /* Use default or previous value */ in read_thresholds_icx()
514 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, ®_val); in read_thresholds_icx()
516 return -ENODATA; /* Use default or previous value */ in read_thresholds_icx()
519 * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0 in read_thresholds_icx()
520 * Device 26, Offset 264e0: IMC 0 channel 1 -> rank 1 in read_thresholds_icx()
521 * Device 27, Offset 224e0: IMC 1 channel 0 -> rank 2 in read_thresholds_icx()
522 * Device 27, Offset 264e0: IMC 1 channel 1 -> rank 3 in read_thresholds_icx()
523 * Device 28, Offset 224e0: IMC 2 channel 0 -> rank 4 in read_thresholds_icx()
524 * Device 28, Offset 264e0: IMC 2 channel 1 -> rank 5 in read_thresholds_icx()
525 * Device 29, Offset 224e0: IMC 3 channel 0 -> rank 6 in read_thresholds_icx()
526 * Device 29, Offset 264e0: IMC 3 channel 1 -> rank 7 in read_thresholds_icx()
531 ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val), in read_thresholds_icx()
547 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, ®_val); in read_thresholds_spr()
549 return -ENODATA; /* Use default or previous value */ in read_thresholds_spr()
551 ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, ®_val); in read_thresholds_spr()
553 return -ENODATA; /* Use default or previous value */ in read_thresholds_spr()
556 * Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0 in read_thresholds_spr()
557 * Device 26, Offset 299a8: IMC 0 channel 1 -> rank 1 in read_thresholds_spr()
558 * Device 27, Offset 219a8: IMC 1 channel 0 -> rank 2 in read_thresholds_spr()
559 * Device 27, Offset 299a8: IMC 1 channel 1 -> rank 3 in read_thresholds_spr()
560 * Device 28, Offset 219a8: IMC 2 channel 0 -> rank 4 in read_thresholds_spr()
561 * Device 28, Offset 299a8: IMC 2 channel 1 -> rank 5 in read_thresholds_spr()
562 * Device 29, Offset 219a8: IMC 3 channel 0 -> rank 6 in read_thresholds_spr()
563 * Device 29, Offset 299a8: IMC 3 channel 1 -> rank 7 in read_thresholds_spr()
568 ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val), in read_thresholds_spr()