region.c (285f2a08841432fc3e498b1cd00cce5216cdf189) | region.c (84328c5acebc10c8cdcf17283ab6c6d548885bfc) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3#include <linux/memregion.h> 4#include <linux/genalloc.h> 5#include <linux/device.h> 6#include <linux/module.h> 7#include <linux/memory.h> 8#include <linux/slab.h> --- 1087 unchanged lines hidden (view full) --- 1096 nr_targets_inc = true; 1097 1098 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr); 1099 if (rc) 1100 goto out_erase; 1101 } 1102 cxld = cxl_rr->decoder; 1103 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3#include <linux/memregion.h> 4#include <linux/genalloc.h> 5#include <linux/device.h> 6#include <linux/module.h> 7#include <linux/memory.h> 8#include <linux/slab.h> --- 1087 unchanged lines hidden (view full) --- 1096 nr_targets_inc = true; 1097 1098 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr); 1099 if (rc) 1100 goto out_erase; 1101 } 1102 cxld = cxl_rr->decoder; 1103 |
1104 /* 1105 * the number of targets should not exceed the target_count 1106 * of the decoder 1107 */ 1108 if (is_switch_decoder(&cxld->dev)) { 1109 struct cxl_switch_decoder *cxlsd; 1110 1111 cxlsd = to_cxl_switch_decoder(&cxld->dev); 1112 if (cxl_rr->nr_targets > cxlsd->nr_targets) { 1113 dev_dbg(&cxlr->dev, 1114 "%s:%s %s add: %s:%s @ %d overflows targets: %d\n", 1115 dev_name(port->uport_dev), dev_name(&port->dev), 1116 dev_name(&cxld->dev), dev_name(&cxlmd->dev), 1117 dev_name(&cxled->cxld.dev), pos, 1118 cxlsd->nr_targets); 1119 rc = -ENXIO; 1120 goto out_erase; 1121 } 1122 } 1123 |
|
1104 rc = cxl_rr_ep_add(cxl_rr, cxled); 1105 if (rc) { 1106 dev_dbg(&cxlr->dev, 1107 "%s: failed to track endpoint %s:%s reference\n", 1108 dev_name(&port->dev), dev_name(&cxlmd->dev), 1109 dev_name(&cxld->dev)); 1110 goto out_erase; 1111 } --- 93 unchanged lines hidden (view full) --- 1205 dev_name(&cxlmd_peer->dev), 1206 dev_name(&cxled_peer->cxld.dev)); 1207 return -ENXIO; 1208 } 1209 1210 return 0; 1211} 1212 | 1124 rc = cxl_rr_ep_add(cxl_rr, cxled); 1125 if (rc) { 1126 dev_dbg(&cxlr->dev, 1127 "%s: failed to track endpoint %s:%s reference\n", 1128 dev_name(&port->dev), dev_name(&cxlmd->dev), 1129 dev_name(&cxld->dev)); 1130 goto out_erase; 1131 } --- 93 unchanged lines hidden (view full) --- 1225 dev_name(&cxlmd_peer->dev), 1226 dev_name(&cxled_peer->cxld.dev)); 1227 return -ENXIO; 1228 } 1229 1230 return 0; 1231} 1232 |
1233static int check_interleave_cap(struct cxl_decoder *cxld, int iw, int ig) 1234{ 1235 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 1236 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 1237 unsigned int interleave_mask; 1238 u8 eiw; 1239 u16 eig; 1240 int high_pos, low_pos; 1241 1242 if (!test_bit(iw, &cxlhdm->iw_cap_mask)) 1243 return -ENXIO; 1244 /* 1245 * Per CXL specification r3.1(8.2.4.20.13 Decoder Protection), 1246 * if eiw < 8: 1247 * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + 8 + eiw] 1248 * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0] 1249 * 1250 * when the eiw is 0, all the bits of HPAOFFSET[51: 0] are used, the 1251 * interleave bits are none. 1252 * 1253 * if eiw >= 8: 1254 * DPAOFFSET[51: eig + 8] = HPAOFFSET[51: eig + eiw] / 3 1255 * DPAOFFSET[eig + 7: 0] = HPAOFFSET[eig + 7: 0] 1256 * 1257 * when the eiw is 8, all the bits of HPAOFFSET[51: 0] are used, the 1258 * interleave bits are none. 1259 */ 1260 ways_to_eiw(iw, &eiw); 1261 if (eiw == 0 || eiw == 8) 1262 return 0; 1263 1264 granularity_to_eig(ig, &eig); 1265 if (eiw > 8) 1266 high_pos = eiw + eig - 1; 1267 else 1268 high_pos = eiw + eig + 7; 1269 low_pos = eig + 8; 1270 interleave_mask = GENMASK(high_pos, low_pos); 1271 if (interleave_mask & ~cxlhdm->interleave_mask) 1272 return -ENXIO; 1273 1274 return 0; 1275} 1276 |
|
1213static int cxl_port_setup_targets(struct cxl_port *port, 1214 struct cxl_region *cxlr, 1215 struct cxl_endpoint_decoder *cxled) 1216{ 1217 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1218 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; 1219 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); 1220 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); --- 134 unchanged lines hidden (view full) --- 1355 cxld->interleave_granularity, 1356 (cxld->flags & CXL_DECODER_F_ENABLE) ? 1357 "enabled" : 1358 "disabled", 1359 cxld->hpa_range.start, cxld->hpa_range.end); 1360 return -ENXIO; 1361 } 1362 } else { | 1277static int cxl_port_setup_targets(struct cxl_port *port, 1278 struct cxl_region *cxlr, 1279 struct cxl_endpoint_decoder *cxled) 1280{ 1281 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1282 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; 1283 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); 1284 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); --- 134 unchanged lines hidden (view full) --- 1419 cxld->interleave_granularity, 1420 (cxld->flags & CXL_DECODER_F_ENABLE) ? 1421 "enabled" : 1422 "disabled", 1423 cxld->hpa_range.start, cxld->hpa_range.end); 1424 return -ENXIO; 1425 } 1426 } else { |
1427 rc = check_interleave_cap(cxld, iw, ig); 1428 if (rc) { 1429 dev_dbg(&cxlr->dev, 1430 "%s:%s iw: %d ig: %d is not supported\n", 1431 dev_name(port->uport_dev), 1432 dev_name(&port->dev), iw, ig); 1433 return rc; 1434 } 1435 |
|
1363 cxld->interleave_ways = iw; 1364 cxld->interleave_granularity = ig; 1365 cxld->hpa_range = (struct range) { 1366 .start = p->res->start, 1367 .end = p->res->end, 1368 }; 1369 } 1370 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev), --- 420 unchanged lines hidden (view full) --- 1791{ 1792 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1793 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1794 struct cxl_region_params *p = &cxlr->params; 1795 struct cxl_port *ep_port, *root_port; 1796 struct cxl_dport *dport; 1797 int rc = -ENXIO; 1798 | 1436 cxld->interleave_ways = iw; 1437 cxld->interleave_granularity = ig; 1438 cxld->hpa_range = (struct range) { 1439 .start = p->res->start, 1440 .end = p->res->end, 1441 }; 1442 } 1443 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev), --- 420 unchanged lines hidden (view full) --- 1864{ 1865 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1866 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1867 struct cxl_region_params *p = &cxlr->params; 1868 struct cxl_port *ep_port, *root_port; 1869 struct cxl_dport *dport; 1870 int rc = -ENXIO; 1871 |
1872 rc = check_interleave_cap(&cxled->cxld, p->interleave_ways, 1873 p->interleave_granularity); 1874 if (rc) { 1875 dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n", 1876 dev_name(&cxled->cxld.dev), p->interleave_ways, 1877 p->interleave_granularity); 1878 return rc; 1879 } 1880 |
|
1799 if (cxled->mode != cxlr->mode) { 1800 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", 1801 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); 1802 return -EINVAL; 1803 } 1804 1805 if (cxled->mode == CXL_DECODER_DEAD) { 1806 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); --- 1538 unchanged lines hidden --- | 1881 if (cxled->mode != cxlr->mode) { 1882 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", 1883 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); 1884 return -EINVAL; 1885 } 1886 1887 if (cxled->mode == CXL_DECODER_DEAD) { 1888 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); --- 1538 unchanged lines hidden --- |