xref: /linux/drivers/cxl/core/core.h (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright(c) 2020 Intel Corporation. */
3 
4 #ifndef __CXL_CORE_H__
5 #define __CXL_CORE_H__
6 
7 #include <cxl/mailbox.h>
8 #include <linux/rwsem.h>
9 
10 extern const struct device_type cxl_nvdimm_bridge_type;
11 extern const struct device_type cxl_nvdimm_type;
12 extern const struct device_type cxl_pmu_type;
13 
14 extern struct attribute_group cxl_base_attribute_group;
15 
16 enum cxl_detach_mode {
17 	DETACH_ONLY,
18 	DETACH_INVALIDATE,
19 };
20 
21 #ifdef CONFIG_CXL_REGION
22 
23 struct cxl_region_context {
24 	struct cxl_endpoint_decoder *cxled;
25 	struct range hpa_range;
26 	int interleave_ways;
27 	int interleave_granularity;
28 };
29 
30 extern struct device_attribute dev_attr_create_pmem_region;
31 extern struct device_attribute dev_attr_create_ram_region;
32 extern struct device_attribute dev_attr_delete_region;
33 extern struct device_attribute dev_attr_region;
34 extern const struct device_type cxl_pmem_region_type;
35 extern const struct device_type cxl_dax_region_type;
36 extern const struct device_type cxl_region_type;
37 
38 int cxl_decoder_detach(struct cxl_region *cxlr,
39 		       struct cxl_endpoint_decoder *cxled, int pos,
40 		       enum cxl_detach_mode mode);
41 
42 #define CXL_REGION_ATTR(x) (&dev_attr_##x.attr)
43 #define CXL_REGION_TYPE(x) (&cxl_region_type)
44 #define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr),
45 #define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type)
46 #define CXL_DAX_REGION_TYPE(x) (&cxl_dax_region_type)
47 int cxl_region_init(void);
48 void cxl_region_exit(void);
49 int cxl_get_poison_by_endpoint(struct cxl_port *port);
50 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa);
51 u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
52 		   u64 dpa);
53 
54 #else
55 static inline u64 cxl_dpa_to_hpa(struct cxl_region *cxlr,
56 				 const struct cxl_memdev *cxlmd, u64 dpa)
57 {
58 	return ULLONG_MAX;
59 }
60 static inline
61 struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa)
62 {
63 	return NULL;
64 }
65 static inline int cxl_get_poison_by_endpoint(struct cxl_port *port)
66 {
67 	return 0;
68 }
69 static inline int cxl_decoder_detach(struct cxl_region *cxlr,
70 				     struct cxl_endpoint_decoder *cxled,
71 				     int pos, enum cxl_detach_mode mode)
72 {
73 	return 0;
74 }
75 static inline int cxl_region_init(void)
76 {
77 	return 0;
78 }
79 static inline void cxl_region_exit(void)
80 {
81 }
82 #define CXL_REGION_ATTR(x) NULL
83 #define CXL_REGION_TYPE(x) NULL
84 #define SET_CXL_REGION_ATTR(x)
85 #define CXL_PMEM_REGION_TYPE(x) NULL
86 #define CXL_DAX_REGION_TYPE(x) NULL
87 #endif
88 
89 struct cxl_send_command;
90 struct cxl_mem_query_commands;
91 int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
92 		  struct cxl_mem_query_commands __user *q);
93 int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s);
94 void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
95 				   resource_size_t length);
96 
97 struct dentry *cxl_debugfs_create_dir(const char *dir);
98 int cxl_dpa_set_part(struct cxl_endpoint_decoder *cxled,
99 		     enum cxl_partition_mode mode);
100 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, u64 size);
101 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled);
102 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled);
103 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled);
104 bool cxl_resource_contains_addr(const struct resource *res, const resource_size_t addr);
105 
106 enum cxl_rcrb {
107 	CXL_RCRB_DOWNSTREAM,
108 	CXL_RCRB_UPSTREAM,
109 };
110 struct cxl_rcrb_info;
111 resource_size_t __rcrb_to_component(struct device *dev,
112 				    struct cxl_rcrb_info *ri,
113 				    enum cxl_rcrb which);
114 u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
115 
116 #define PCI_RCRB_CAP_LIST_ID_MASK	GENMASK(7, 0)
117 #define PCI_RCRB_CAP_HDR_ID_MASK	GENMASK(7, 0)
118 #define PCI_RCRB_CAP_HDR_NEXT_MASK	GENMASK(15, 8)
119 #define PCI_CAP_EXP_SIZEOF		0x3c
120 
121 struct cxl_rwsem {
122 	/*
123 	 * All changes to HPA (interleave configuration) occur with this
124 	 * lock held for write.
125 	 */
126 	struct rw_semaphore region;
127 	/*
128 	 * All changes to a device DPA space occur with this lock held
129 	 * for write.
130 	 */
131 	struct rw_semaphore dpa;
132 };
133 
134 extern struct cxl_rwsem cxl_rwsem;
135 
136 int cxl_memdev_init(void);
137 void cxl_memdev_exit(void);
138 void cxl_mbox_init(void);
139 
140 enum cxl_poison_trace_type {
141 	CXL_POISON_TRACE_LIST,
142 	CXL_POISON_TRACE_INJECT,
143 	CXL_POISON_TRACE_CLEAR,
144 };
145 
146 enum poison_cmd_enabled_bits;
147 bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
148 			       enum poison_cmd_enabled_bits cmd);
149 
150 long cxl_pci_get_latency(struct pci_dev *pdev);
151 int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
152 int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
153 					struct access_coordinate *c);
154 
155 static inline struct device *dport_to_host(struct cxl_dport *dport)
156 {
157 	struct cxl_port *port = dport->port;
158 
159 	if (is_cxl_root(port))
160 		return port->uport_dev;
161 	return &port->dev;
162 }
163 #ifdef CONFIG_CXL_RAS
164 int cxl_ras_init(void);
165 void cxl_ras_exit(void);
166 bool cxl_handle_ras(struct device *dev, void __iomem *ras_base);
167 void cxl_handle_cor_ras(struct device *dev, void __iomem *ras_base);
168 void cxl_dport_map_rch_aer(struct cxl_dport *dport);
169 void cxl_disable_rch_root_ints(struct cxl_dport *dport);
170 void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds);
171 void devm_cxl_dport_ras_setup(struct cxl_dport *dport);
172 #else
173 static inline int cxl_ras_init(void)
174 {
175 	return 0;
176 }
177 static inline void cxl_ras_exit(void) { }
178 static inline bool cxl_handle_ras(struct device *dev, void __iomem *ras_base)
179 {
180 	return false;
181 }
182 static inline void cxl_handle_cor_ras(struct device *dev, void __iomem *ras_base) { }
183 static inline void cxl_dport_map_rch_aer(struct cxl_dport *dport) { }
184 static inline void cxl_disable_rch_root_ints(struct cxl_dport *dport) { }
185 static inline void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
186 static inline void devm_cxl_dport_ras_setup(struct cxl_dport *dport) { }
187 #endif /* CONFIG_CXL_RAS */
188 
189 int cxl_gpf_port_setup(struct cxl_dport *dport);
190 
191 struct cxl_hdm;
192 int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
193 			struct cxl_endpoint_dvsec_info *info);
194 int cxl_port_get_possible_dports(struct cxl_port *port);
195 
196 #ifdef CONFIG_CXL_FEATURES
197 struct cxl_feat_entry *
198 cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
199 size_t cxl_get_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
200 		       enum cxl_get_feat_selection selection,
201 		       void *feat_out, size_t feat_out_size, u16 offset,
202 		       u16 *return_code);
203 int cxl_set_feature(struct cxl_mailbox *cxl_mbox, const uuid_t *feat_uuid,
204 		    u8 feat_version, const void *feat_data,
205 		    size_t feat_data_size, u32 feat_flag, u16 offset,
206 		    u16 *return_code);
207 #endif
208 
209 #endif /* __CXL_CORE_H__ */
210