1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2012
4 *
5 * Author(s):
6 * Jan Glauber <jang@linux.vnet.ibm.com>
7 */
8
9 #define KMSG_COMPONENT "zpci"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/miscdevice.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/delay.h>
18 #include <linux/pci.h>
19 #include <linux/uaccess.h>
20 #include <asm/asm-extable.h>
21 #include <asm/pci_debug.h>
22 #include <asm/pci_clp.h>
23 #include <asm/asm.h>
24 #include <asm/clp.h>
25 #include <uapi/asm/clp.h>
26
27 #include "pci_bus.h"
28
29 bool zpci_unique_uid;
30
update_uid_checking(bool new)31 void update_uid_checking(bool new)
32 {
33 if (zpci_unique_uid != new)
34 zpci_dbg(3, "uid checking:%d\n", new);
35
36 zpci_unique_uid = new;
37 }
38
zpci_err_clp(unsigned int rsp,int rc)39 static inline void zpci_err_clp(unsigned int rsp, int rc)
40 {
41 struct {
42 unsigned int rsp;
43 int rc;
44 } __packed data = {rsp, rc};
45
46 zpci_err_hex(&data, sizeof(data));
47 }
48
49 /*
50 * Call Logical Processor with c=1, lps=0 and command 1
51 * to get the bit mask of installed logical processors
52 */
clp_get_ilp(unsigned long * ilp)53 static inline int clp_get_ilp(unsigned long *ilp)
54 {
55 unsigned long mask;
56 int cc, exception;
57
58 exception = 1;
59 asm_inline volatile (
60 " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
61 "0: lhi %[exc],0\n"
62 "1:\n"
63 CC_IPM(cc)
64 EX_TABLE(0b, 1b)
65 : CC_OUT(cc, cc), [mask] "=d" (mask), [exc] "+d" (exception)
66 : [cmd] "a" (1)
67 : CC_CLOBBER);
68 *ilp = mask;
69 return exception ? 3 : CC_TRANSFORM(cc);
70 }
71
72 /*
73 * Call Logical Processor with c=0, the give constant lps and an lpcb request.
74 */
clp_req(void * data,unsigned int lps)75 static __always_inline int clp_req(void *data, unsigned int lps)
76 {
77 struct { u8 _[CLP_BLK_SIZE]; } *req = data;
78 int cc, exception;
79 u64 ignored;
80
81 exception = 1;
82 asm_inline volatile (
83 " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
84 "0: lhi %[exc],0\n"
85 "1:\n"
86 CC_IPM(cc)
87 EX_TABLE(0b, 1b)
88 : CC_OUT(cc, cc), [ign] "=d" (ignored), "+m" (*req), [exc] "+d" (exception)
89 : [req] "a" (req), [lps] "i" (lps)
90 : CC_CLOBBER);
91 return exception ? 3 : CC_TRANSFORM(cc);
92 }
93
clp_alloc_block(gfp_t gfp_mask)94 static void *clp_alloc_block(gfp_t gfp_mask)
95 {
96 return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
97 }
98
clp_free_block(void * ptr)99 static void clp_free_block(void *ptr)
100 {
101 free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
102 }
103
clp_store_query_pci_fngrp(struct zpci_dev * zdev,struct clp_rsp_query_pci_grp * response)104 static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
105 struct clp_rsp_query_pci_grp *response)
106 {
107 zdev->tlb_refresh = response->refresh;
108 zdev->dma_mask = response->dasm;
109 zdev->msi_addr = response->msia;
110 zdev->max_msi = response->noi;
111 zdev->fmb_update = response->mui;
112 zdev->version = response->version;
113 zdev->maxstbl = response->maxstbl;
114 zdev->dtsm = response->dtsm;
115 zdev->rtr_avail = response->rtr;
116
117 switch (response->version) {
118 case 1:
119 zdev->max_bus_speed = PCIE_SPEED_5_0GT;
120 break;
121 default:
122 zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
123 break;
124 }
125 }
126
clp_query_pci_fngrp(struct zpci_dev * zdev,u8 pfgid)127 static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
128 {
129 struct clp_req_rsp_query_pci_grp *rrb;
130 int rc;
131
132 rrb = clp_alloc_block(GFP_KERNEL);
133 if (!rrb)
134 return -ENOMEM;
135
136 memset(rrb, 0, sizeof(*rrb));
137 rrb->request.hdr.len = sizeof(rrb->request);
138 rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
139 rrb->response.hdr.len = sizeof(rrb->response);
140 rrb->request.pfgid = pfgid;
141
142 rc = clp_req(rrb, CLP_LPS_PCI);
143 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
144 clp_store_query_pci_fngrp(zdev, &rrb->response);
145 else {
146 zpci_err("Q PCI FGRP:\n");
147 zpci_err_clp(rrb->response.hdr.rsp, rc);
148 rc = -EIO;
149 }
150 clp_free_block(rrb);
151 return rc;
152 }
153
clp_store_query_pci_fn(struct zpci_dev * zdev,struct clp_rsp_query_pci * response)154 static int clp_store_query_pci_fn(struct zpci_dev *zdev,
155 struct clp_rsp_query_pci *response)
156 {
157 int i;
158
159 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
160 zdev->bars[i].val = le32_to_cpu(response->bar[i]);
161 zdev->bars[i].size = response->bar_size[i];
162 }
163 zdev->start_dma = response->sdma;
164 zdev->end_dma = response->edma;
165 zdev->pchid = response->pchid;
166 zdev->pfgid = response->pfgid;
167 zdev->pft = response->pft;
168 zdev->vfn = response->vfn;
169 zdev->port = response->port;
170 zdev->fidparm = response->fidparm;
171 zdev->uid = response->uid;
172 zdev->fmb_length = sizeof(u32) * response->fmb_len;
173 zdev->is_physfn = response->is_physfn;
174 zdev->rid_available = response->rid_avail;
175 if (zdev->rid_available)
176 zdev->rid = response->rid;
177 zdev->tid_avail = response->tid_avail;
178 if (zdev->tid_avail)
179 zdev->tid = response->tid;
180
181 memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
182 if (response->util_str_avail) {
183 memcpy(zdev->util_str, response->util_str,
184 sizeof(zdev->util_str));
185 zdev->util_str_avail = 1;
186 }
187 zdev->mio_capable = response->mio_addr_avail;
188 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
189 if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1))))
190 continue;
191
192 zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
193 zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
194 }
195 return 0;
196 }
197
clp_query_pci_fn(struct zpci_dev * zdev)198 int clp_query_pci_fn(struct zpci_dev *zdev)
199 {
200 struct clp_req_rsp_query_pci *rrb;
201 int rc;
202
203 rrb = clp_alloc_block(GFP_KERNEL);
204 if (!rrb)
205 return -ENOMEM;
206
207 memset(rrb, 0, sizeof(*rrb));
208 rrb->request.hdr.len = sizeof(rrb->request);
209 rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
210 rrb->response.hdr.len = sizeof(rrb->response);
211 rrb->request.fh = zdev->fh;
212
213 rc = clp_req(rrb, CLP_LPS_PCI);
214 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
215 rc = clp_store_query_pci_fn(zdev, &rrb->response);
216 if (rc)
217 goto out;
218 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
219 } else {
220 zpci_err("Q PCI FN:\n");
221 zpci_err_clp(rrb->response.hdr.rsp, rc);
222 rc = -EIO;
223 }
224 out:
225 clp_free_block(rrb);
226 return rc;
227 }
228
229 /**
230 * clp_set_pci_fn() - Execute a command on a PCI function
231 * @zdev: Function that will be affected
232 * @fh: Out parameter for updated function handle
233 * @nr_dma_as: DMA address space number
234 * @command: The command code to execute
235 *
236 * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
237 * > 0 for non-success platform responses
238 */
clp_set_pci_fn(struct zpci_dev * zdev,u32 * fh,u8 nr_dma_as,u8 command)239 static int clp_set_pci_fn(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as, u8 command)
240 {
241 struct clp_req_rsp_set_pci *rrb;
242 int rc, retries = 100;
243 u32 gisa = 0;
244
245 *fh = 0;
246 rrb = clp_alloc_block(GFP_KERNEL);
247 if (!rrb)
248 return -ENOMEM;
249
250 if (command != CLP_SET_DISABLE_PCI_FN)
251 gisa = zdev->gisa;
252
253 do {
254 memset(rrb, 0, sizeof(*rrb));
255 rrb->request.hdr.len = sizeof(rrb->request);
256 rrb->request.hdr.cmd = CLP_SET_PCI_FN;
257 rrb->response.hdr.len = sizeof(rrb->response);
258 rrb->request.fh = zdev->fh;
259 rrb->request.oc = command;
260 rrb->request.ndas = nr_dma_as;
261 rrb->request.gisa = gisa;
262
263 rc = clp_req(rrb, CLP_LPS_PCI);
264 if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
265 retries--;
266 if (retries < 0)
267 break;
268 msleep(20);
269 }
270 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
271
272 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
273 *fh = rrb->response.fh;
274 } else {
275 zpci_err("Set PCI FN:\n");
276 zpci_err_clp(rrb->response.hdr.rsp, rc);
277 if (!rc)
278 rc = rrb->response.hdr.rsp;
279 }
280 clp_free_block(rrb);
281 return rc;
282 }
283
clp_setup_writeback_mio(void)284 int clp_setup_writeback_mio(void)
285 {
286 struct clp_req_rsp_slpc_pci *rrb;
287 u8 wb_bit_pos;
288 int rc;
289
290 rrb = clp_alloc_block(GFP_KERNEL);
291 if (!rrb)
292 return -ENOMEM;
293
294 memset(rrb, 0, sizeof(*rrb));
295 rrb->request.hdr.len = sizeof(rrb->request);
296 rrb->request.hdr.cmd = CLP_SLPC;
297 rrb->response.hdr.len = sizeof(rrb->response);
298
299 rc = clp_req(rrb, CLP_LPS_PCI);
300 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
301 if (rrb->response.vwb) {
302 wb_bit_pos = rrb->response.mio_wb;
303 set_bit_inv(wb_bit_pos, &mio_wb_bit_mask);
304 zpci_dbg(3, "wb bit: %d\n", wb_bit_pos);
305 } else {
306 zpci_dbg(3, "wb bit: n.a.\n");
307 }
308
309 } else {
310 zpci_err("SLPC PCI:\n");
311 zpci_err_clp(rrb->response.hdr.rsp, rc);
312 rc = -EIO;
313 }
314 clp_free_block(rrb);
315 return rc;
316 }
317
clp_enable_fh(struct zpci_dev * zdev,u32 * fh,u8 nr_dma_as)318 int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as)
319 {
320 int rc;
321
322 rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
323 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
324 if (!rc && zpci_use_mio(zdev)) {
325 rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_MIO);
326 zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
327 zdev->fid, *fh, rc);
328 if (rc)
329 clp_disable_fh(zdev, fh);
330 }
331 return rc;
332 }
333
clp_disable_fh(struct zpci_dev * zdev,u32 * fh)334 int clp_disable_fh(struct zpci_dev *zdev, u32 *fh)
335 {
336 int rc;
337
338 if (!zdev_enabled(zdev))
339 return 0;
340
341 rc = clp_set_pci_fn(zdev, fh, 0, CLP_SET_DISABLE_PCI_FN);
342 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
343 return rc;
344 }
345
clp_list_pci_req(struct clp_req_rsp_list_pci * rrb,u64 * resume_token,int * nentries)346 static int clp_list_pci_req(struct clp_req_rsp_list_pci *rrb,
347 u64 *resume_token, int *nentries)
348 {
349 int rc;
350
351 memset(rrb, 0, sizeof(*rrb));
352 rrb->request.hdr.len = sizeof(rrb->request);
353 rrb->request.hdr.cmd = CLP_LIST_PCI;
354 /* store as many entries as possible */
355 rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
356 rrb->request.resume_token = *resume_token;
357
358 /* Get PCI function handle list */
359 rc = clp_req(rrb, CLP_LPS_PCI);
360 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
361 zpci_err("List PCI FN:\n");
362 zpci_err_clp(rrb->response.hdr.rsp, rc);
363 return -EIO;
364 }
365
366 update_uid_checking(rrb->response.uid_checking);
367 WARN_ON_ONCE(rrb->response.entry_size !=
368 sizeof(struct clp_fh_list_entry));
369
370 *nentries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
371 rrb->response.entry_size;
372 *resume_token = rrb->response.resume_token;
373
374 return rc;
375 }
376
clp_list_pci(struct clp_req_rsp_list_pci * rrb,void * data,void (* cb)(struct clp_fh_list_entry *,void *))377 static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
378 void (*cb)(struct clp_fh_list_entry *, void *))
379 {
380 u64 resume_token = 0;
381 int nentries, i, rc;
382
383 do {
384 rc = clp_list_pci_req(rrb, &resume_token, &nentries);
385 if (rc)
386 return rc;
387 for (i = 0; i < nentries; i++)
388 cb(&rrb->response.fh_list[i], data);
389 } while (resume_token);
390
391 return rc;
392 }
393
clp_find_pci(struct clp_req_rsp_list_pci * rrb,u32 fid,struct clp_fh_list_entry * entry)394 static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
395 struct clp_fh_list_entry *entry)
396 {
397 struct clp_fh_list_entry *fh_list;
398 u64 resume_token = 0;
399 int nentries, i, rc;
400
401 do {
402 rc = clp_list_pci_req(rrb, &resume_token, &nentries);
403 if (rc)
404 return rc;
405 fh_list = rrb->response.fh_list;
406 for (i = 0; i < nentries; i++) {
407 if (fh_list[i].fid == fid) {
408 *entry = fh_list[i];
409 return 0;
410 }
411 }
412 } while (resume_token);
413
414 return -ENODEV;
415 }
416
__clp_add(struct clp_fh_list_entry * entry,void * data)417 static void __clp_add(struct clp_fh_list_entry *entry, void *data)
418 {
419 struct list_head *scan_list = data;
420 struct zpci_dev *zdev;
421
422 if (!entry->vendor_id)
423 return;
424
425 zdev = get_zdev_by_fid(entry->fid);
426 if (zdev) {
427 zpci_zdev_put(zdev);
428 return;
429 }
430 zdev = zpci_create_device(entry->fid, entry->fh, entry->config_state);
431 if (IS_ERR(zdev))
432 return;
433 list_add_tail(&zdev->entry, scan_list);
434 }
435
clp_scan_pci_devices(struct list_head * scan_list)436 int clp_scan_pci_devices(struct list_head *scan_list)
437 {
438 struct clp_req_rsp_list_pci *rrb;
439 int rc;
440
441 rrb = clp_alloc_block(GFP_KERNEL);
442 if (!rrb)
443 return -ENOMEM;
444
445 rc = clp_list_pci(rrb, scan_list, __clp_add);
446
447 clp_free_block(rrb);
448 return rc;
449 }
450
451 /*
452 * Get the current function handle of the function matching @fid
453 */
clp_refresh_fh(u32 fid,u32 * fh)454 int clp_refresh_fh(u32 fid, u32 *fh)
455 {
456 struct clp_req_rsp_list_pci *rrb;
457 struct clp_fh_list_entry entry;
458 int rc;
459
460 rrb = clp_alloc_block(GFP_NOWAIT);
461 if (!rrb)
462 return -ENOMEM;
463
464 rc = clp_find_pci(rrb, fid, &entry);
465 if (!rc)
466 *fh = entry.fh;
467
468 clp_free_block(rrb);
469 return rc;
470 }
471
clp_get_state(u32 fid,enum zpci_state * state)472 int clp_get_state(u32 fid, enum zpci_state *state)
473 {
474 struct clp_req_rsp_list_pci *rrb;
475 struct clp_fh_list_entry entry;
476 int rc;
477
478 rrb = clp_alloc_block(GFP_ATOMIC);
479 if (!rrb)
480 return -ENOMEM;
481
482 rc = clp_find_pci(rrb, fid, &entry);
483 if (!rc) {
484 *state = entry.config_state;
485 } else if (rc == -ENODEV) {
486 *state = ZPCI_FN_STATE_RESERVED;
487 rc = 0;
488 }
489
490 clp_free_block(rrb);
491 return rc;
492 }
493
clp_base_slpc(struct clp_req * req,struct clp_req_rsp_slpc * lpcb)494 static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
495 {
496 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
497
498 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
499 lpcb->response.hdr.len > limit)
500 return -EINVAL;
501 return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
502 }
503
clp_base_command(struct clp_req * req,struct clp_req_hdr * lpcb)504 static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
505 {
506 switch (lpcb->cmd) {
507 case 0x0001: /* store logical-processor characteristics */
508 return clp_base_slpc(req, (void *) lpcb);
509 default:
510 return -EINVAL;
511 }
512 }
513
clp_pci_slpc(struct clp_req * req,struct clp_req_rsp_slpc_pci * lpcb)514 static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc_pci *lpcb)
515 {
516 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
517
518 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
519 lpcb->response.hdr.len > limit)
520 return -EINVAL;
521 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
522 }
523
clp_pci_list(struct clp_req * req,struct clp_req_rsp_list_pci * lpcb)524 static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
525 {
526 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
527
528 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
529 lpcb->response.hdr.len > limit)
530 return -EINVAL;
531 if (lpcb->request.reserved2 != 0)
532 return -EINVAL;
533 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
534 }
535
clp_pci_query(struct clp_req * req,struct clp_req_rsp_query_pci * lpcb)536 static int clp_pci_query(struct clp_req *req,
537 struct clp_req_rsp_query_pci *lpcb)
538 {
539 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
540
541 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
542 lpcb->response.hdr.len > limit)
543 return -EINVAL;
544 if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
545 return -EINVAL;
546 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
547 }
548
clp_pci_query_grp(struct clp_req * req,struct clp_req_rsp_query_pci_grp * lpcb)549 static int clp_pci_query_grp(struct clp_req *req,
550 struct clp_req_rsp_query_pci_grp *lpcb)
551 {
552 unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
553
554 if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
555 lpcb->response.hdr.len > limit)
556 return -EINVAL;
557 if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
558 lpcb->request.reserved4 != 0)
559 return -EINVAL;
560 return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
561 }
562
clp_pci_command(struct clp_req * req,struct clp_req_hdr * lpcb)563 static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
564 {
565 switch (lpcb->cmd) {
566 case 0x0001: /* store logical-processor characteristics */
567 return clp_pci_slpc(req, (void *) lpcb);
568 case 0x0002: /* list PCI functions */
569 return clp_pci_list(req, (void *) lpcb);
570 case 0x0003: /* query PCI function */
571 return clp_pci_query(req, (void *) lpcb);
572 case 0x0004: /* query PCI function group */
573 return clp_pci_query_grp(req, (void *) lpcb);
574 default:
575 return -EINVAL;
576 }
577 }
578
clp_normal_command(struct clp_req * req)579 static int clp_normal_command(struct clp_req *req)
580 {
581 struct clp_req_hdr *lpcb;
582 void __user *uptr;
583 int rc;
584
585 rc = -EINVAL;
586 if (req->lps != 0 && req->lps != 2)
587 goto out;
588
589 rc = -ENOMEM;
590 lpcb = clp_alloc_block(GFP_KERNEL);
591 if (!lpcb)
592 goto out;
593
594 rc = -EFAULT;
595 uptr = (void __force __user *)(unsigned long) req->data_p;
596 if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
597 goto out_free;
598
599 rc = -EINVAL;
600 if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
601 goto out_free;
602
603 switch (req->lps) {
604 case 0:
605 rc = clp_base_command(req, lpcb);
606 break;
607 case 2:
608 rc = clp_pci_command(req, lpcb);
609 break;
610 }
611 if (rc)
612 goto out_free;
613
614 rc = -EFAULT;
615 if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
616 goto out_free;
617
618 rc = 0;
619
620 out_free:
621 clp_free_block(lpcb);
622 out:
623 return rc;
624 }
625
clp_immediate_command(struct clp_req * req)626 static int clp_immediate_command(struct clp_req *req)
627 {
628 void __user *uptr;
629 unsigned long ilp;
630 int exists;
631
632 if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
633 return -EINVAL;
634
635 uptr = (void __force __user *)(unsigned long) req->data_p;
636 if (req->cmd == 0) {
637 /* Command code 0: test for a specific processor */
638 exists = test_bit_inv(req->lps, &ilp);
639 return put_user(exists, (int __user *) uptr);
640 }
641 /* Command code 1: return bit mask of installed processors */
642 return put_user(ilp, (unsigned long __user *) uptr);
643 }
644
clp_misc_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)645 static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
646 unsigned long arg)
647 {
648 struct clp_req req;
649 void __user *argp;
650
651 if (cmd != CLP_SYNC)
652 return -EINVAL;
653
654 argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
655 if (copy_from_user(&req, argp, sizeof(req)))
656 return -EFAULT;
657 if (req.r != 0)
658 return -EINVAL;
659 return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
660 }
661
clp_misc_release(struct inode * inode,struct file * filp)662 static int clp_misc_release(struct inode *inode, struct file *filp)
663 {
664 return 0;
665 }
666
667 static const struct file_operations clp_misc_fops = {
668 .owner = THIS_MODULE,
669 .open = nonseekable_open,
670 .release = clp_misc_release,
671 .unlocked_ioctl = clp_misc_ioctl,
672 .compat_ioctl = clp_misc_ioctl,
673 };
674
675 static struct miscdevice clp_misc_device = {
676 .minor = MISC_DYNAMIC_MINOR,
677 .name = "clp",
678 .fops = &clp_misc_fops,
679 };
680
681 builtin_misc_device(clp_misc_device);
682