xref: /linux/arch/s390/pci/pci_clp.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "zpci"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/delay.h>
15 #include <linux/pci.h>
16 #include <asm/pci_debug.h>
17 #include <asm/pci_clp.h>
18 
19 static inline void zpci_err_clp(unsigned int rsp, int rc)
20 {
21 	struct {
22 		unsigned int rsp;
23 		int rc;
24 	} __packed data = {rsp, rc};
25 
26 	zpci_err_hex(&data, sizeof(data));
27 }
28 
29 /*
30  * Call Logical Processor
31  * Retry logic is handled by the caller.
32  */
33 static inline u8 clp_instr(void *data)
34 {
35 	struct { u8 _[CLP_BLK_SIZE]; } *req = data;
36 	u64 ignored;
37 	u8 cc;
38 
39 	asm volatile (
40 		"	.insn	rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
41 		"	ipm	%[cc]\n"
42 		"	srl	%[cc],28\n"
43 		: [cc] "=d" (cc), [ign] "=d" (ignored), "+m" (*req)
44 		: [req] "a" (req)
45 		: "cc");
46 	return cc;
47 }
48 
49 static void *clp_alloc_block(gfp_t gfp_mask)
50 {
51 	return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
52 }
53 
54 static void clp_free_block(void *ptr)
55 {
56 	free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
57 }
58 
59 static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
60 				      struct clp_rsp_query_pci_grp *response)
61 {
62 	zdev->tlb_refresh = response->refresh;
63 	zdev->dma_mask = response->dasm;
64 	zdev->msi_addr = response->msia;
65 	zdev->max_msi = response->noi;
66 	zdev->fmb_update = response->mui;
67 
68 	switch (response->version) {
69 	case 1:
70 		zdev->max_bus_speed = PCIE_SPEED_5_0GT;
71 		break;
72 	default:
73 		zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
74 		break;
75 	}
76 }
77 
78 static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
79 {
80 	struct clp_req_rsp_query_pci_grp *rrb;
81 	int rc;
82 
83 	rrb = clp_alloc_block(GFP_KERNEL);
84 	if (!rrb)
85 		return -ENOMEM;
86 
87 	memset(rrb, 0, sizeof(*rrb));
88 	rrb->request.hdr.len = sizeof(rrb->request);
89 	rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
90 	rrb->response.hdr.len = sizeof(rrb->response);
91 	rrb->request.pfgid = pfgid;
92 
93 	rc = clp_instr(rrb);
94 	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
95 		clp_store_query_pci_fngrp(zdev, &rrb->response);
96 	else {
97 		zpci_err("Q PCI FGRP:\n");
98 		zpci_err_clp(rrb->response.hdr.rsp, rc);
99 		rc = -EIO;
100 	}
101 	clp_free_block(rrb);
102 	return rc;
103 }
104 
105 static int clp_store_query_pci_fn(struct zpci_dev *zdev,
106 				  struct clp_rsp_query_pci *response)
107 {
108 	int i;
109 
110 	for (i = 0; i < PCI_BAR_COUNT; i++) {
111 		zdev->bars[i].val = le32_to_cpu(response->bar[i]);
112 		zdev->bars[i].size = response->bar_size[i];
113 	}
114 	zdev->start_dma = response->sdma;
115 	zdev->end_dma = response->edma;
116 	zdev->pchid = response->pchid;
117 	zdev->pfgid = response->pfgid;
118 	zdev->pft = response->pft;
119 	zdev->vfn = response->vfn;
120 	zdev->uid = response->uid;
121 
122 	memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
123 	if (response->util_str_avail) {
124 		memcpy(zdev->util_str, response->util_str,
125 		       sizeof(zdev->util_str));
126 	}
127 
128 	return 0;
129 }
130 
131 static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
132 {
133 	struct clp_req_rsp_query_pci *rrb;
134 	int rc;
135 
136 	rrb = clp_alloc_block(GFP_KERNEL);
137 	if (!rrb)
138 		return -ENOMEM;
139 
140 	memset(rrb, 0, sizeof(*rrb));
141 	rrb->request.hdr.len = sizeof(rrb->request);
142 	rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
143 	rrb->response.hdr.len = sizeof(rrb->response);
144 	rrb->request.fh = fh;
145 
146 	rc = clp_instr(rrb);
147 	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
148 		rc = clp_store_query_pci_fn(zdev, &rrb->response);
149 		if (rc)
150 			goto out;
151 		if (rrb->response.pfgid)
152 			rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
153 	} else {
154 		zpci_err("Q PCI FN:\n");
155 		zpci_err_clp(rrb->response.hdr.rsp, rc);
156 		rc = -EIO;
157 	}
158 out:
159 	clp_free_block(rrb);
160 	return rc;
161 }
162 
163 int clp_add_pci_device(u32 fid, u32 fh, int configured)
164 {
165 	struct zpci_dev *zdev;
166 	int rc;
167 
168 	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
169 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
170 	if (!zdev)
171 		return -ENOMEM;
172 
173 	zdev->fh = fh;
174 	zdev->fid = fid;
175 
176 	/* Query function properties and update zdev */
177 	rc = clp_query_pci_fn(zdev, fh);
178 	if (rc)
179 		goto error;
180 
181 	if (configured)
182 		zdev->state = ZPCI_FN_STATE_CONFIGURED;
183 	else
184 		zdev->state = ZPCI_FN_STATE_STANDBY;
185 
186 	rc = zpci_create_device(zdev);
187 	if (rc)
188 		goto error;
189 	return 0;
190 
191 error:
192 	kfree(zdev);
193 	return rc;
194 }
195 
196 /*
197  * Enable/Disable a given PCI function defined by its function handle.
198  */
199 static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
200 {
201 	struct clp_req_rsp_set_pci *rrb;
202 	int rc, retries = 100;
203 
204 	rrb = clp_alloc_block(GFP_KERNEL);
205 	if (!rrb)
206 		return -ENOMEM;
207 
208 	do {
209 		memset(rrb, 0, sizeof(*rrb));
210 		rrb->request.hdr.len = sizeof(rrb->request);
211 		rrb->request.hdr.cmd = CLP_SET_PCI_FN;
212 		rrb->response.hdr.len = sizeof(rrb->response);
213 		rrb->request.fh = *fh;
214 		rrb->request.oc = command;
215 		rrb->request.ndas = nr_dma_as;
216 
217 		rc = clp_instr(rrb);
218 		if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
219 			retries--;
220 			if (retries < 0)
221 				break;
222 			msleep(20);
223 		}
224 	} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
225 
226 	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
227 		*fh = rrb->response.fh;
228 	else {
229 		zpci_err("Set PCI FN:\n");
230 		zpci_err_clp(rrb->response.hdr.rsp, rc);
231 		rc = -EIO;
232 	}
233 	clp_free_block(rrb);
234 	return rc;
235 }
236 
237 int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
238 {
239 	u32 fh = zdev->fh;
240 	int rc;
241 
242 	rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
243 	if (!rc)
244 		/* Success -> store enabled handle in zdev */
245 		zdev->fh = fh;
246 
247 	zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
248 	return rc;
249 }
250 
251 int clp_disable_fh(struct zpci_dev *zdev)
252 {
253 	u32 fh = zdev->fh;
254 	int rc;
255 
256 	if (!zdev_enabled(zdev))
257 		return 0;
258 
259 	rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
260 	if (!rc)
261 		/* Success -> store disabled handle in zdev */
262 		zdev->fh = fh;
263 
264 	zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
265 	return rc;
266 }
267 
268 static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
269 			void (*cb)(struct clp_fh_list_entry *entry))
270 {
271 	u64 resume_token = 0;
272 	int entries, i, rc;
273 
274 	do {
275 		memset(rrb, 0, sizeof(*rrb));
276 		rrb->request.hdr.len = sizeof(rrb->request);
277 		rrb->request.hdr.cmd = CLP_LIST_PCI;
278 		/* store as many entries as possible */
279 		rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
280 		rrb->request.resume_token = resume_token;
281 
282 		/* Get PCI function handle list */
283 		rc = clp_instr(rrb);
284 		if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
285 			zpci_err("List PCI FN:\n");
286 			zpci_err_clp(rrb->response.hdr.rsp, rc);
287 			rc = -EIO;
288 			goto out;
289 		}
290 
291 		WARN_ON_ONCE(rrb->response.entry_size !=
292 			sizeof(struct clp_fh_list_entry));
293 
294 		entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
295 			rrb->response.entry_size;
296 
297 		resume_token = rrb->response.resume_token;
298 		for (i = 0; i < entries; i++)
299 			cb(&rrb->response.fh_list[i]);
300 	} while (resume_token);
301 out:
302 	return rc;
303 }
304 
305 static void __clp_add(struct clp_fh_list_entry *entry)
306 {
307 	if (!entry->vendor_id)
308 		return;
309 
310 	clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
311 }
312 
313 static void __clp_rescan(struct clp_fh_list_entry *entry)
314 {
315 	struct zpci_dev *zdev;
316 
317 	if (!entry->vendor_id)
318 		return;
319 
320 	zdev = get_zdev_by_fid(entry->fid);
321 	if (!zdev) {
322 		clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
323 		return;
324 	}
325 
326 	if (!entry->config_state) {
327 		/*
328 		 * The handle is already disabled, that means no iota/irq freeing via
329 		 * the firmware interfaces anymore. Need to free resources manually
330 		 * (DMA memory, debug, sysfs)...
331 		 */
332 		zpci_stop_device(zdev);
333 	}
334 }
335 
336 static void __clp_update(struct clp_fh_list_entry *entry)
337 {
338 	struct zpci_dev *zdev;
339 
340 	if (!entry->vendor_id)
341 		return;
342 
343 	zdev = get_zdev_by_fid(entry->fid);
344 	if (!zdev)
345 		return;
346 
347 	zdev->fh = entry->fh;
348 }
349 
350 int clp_scan_pci_devices(void)
351 {
352 	struct clp_req_rsp_list_pci *rrb;
353 	int rc;
354 
355 	rrb = clp_alloc_block(GFP_KERNEL);
356 	if (!rrb)
357 		return -ENOMEM;
358 
359 	rc = clp_list_pci(rrb, __clp_add);
360 
361 	clp_free_block(rrb);
362 	return rc;
363 }
364 
365 int clp_rescan_pci_devices(void)
366 {
367 	struct clp_req_rsp_list_pci *rrb;
368 	int rc;
369 
370 	rrb = clp_alloc_block(GFP_KERNEL);
371 	if (!rrb)
372 		return -ENOMEM;
373 
374 	rc = clp_list_pci(rrb, __clp_rescan);
375 
376 	clp_free_block(rrb);
377 	return rc;
378 }
379 
380 int clp_rescan_pci_devices_simple(void)
381 {
382 	struct clp_req_rsp_list_pci *rrb;
383 	int rc;
384 
385 	rrb = clp_alloc_block(GFP_NOWAIT);
386 	if (!rrb)
387 		return -ENOMEM;
388 
389 	rc = clp_list_pci(rrb, __clp_update);
390 
391 	clp_free_block(rrb);
392 	return rc;
393 }
394