1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for IBM Power 842 compression accelerator 4 * 5 * Copyright (C) IBM Corporation, 2012 6 * 7 * Authors: Robert Jennings <rcj@linux.vnet.ibm.com> 8 * Seth Jennings <sjenning@linux.vnet.ibm.com> 9 */ 10 11 #include <asm/vio.h> 12 #include <asm/hvcall.h> 13 #include <asm/vas.h> 14 15 #include "nx-842.h" 16 #include "nx_csbcpb.h" /* struct nx_csbcpb */ 17 18 MODULE_LICENSE("GPL"); 19 MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>"); 20 MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); 21 MODULE_ALIAS_CRYPTO("842"); 22 MODULE_ALIAS_CRYPTO("842-nx"); 23 24 /* 25 * Coprocessor type specific capabilities from the hypervisor. 26 */ 27 struct hv_nx_cop_caps { 28 __be64 descriptor; 29 __be64 req_max_processed_len; /* Max bytes in one GZIP request */ 30 __be64 min_compress_len; /* Min compression size in bytes */ 31 __be64 min_decompress_len; /* Min decompression size in bytes */ 32 } __packed __aligned(0x1000); 33 34 /* 35 * Coprocessor type specific capabilities. 36 */ 37 struct nx_cop_caps { 38 u64 descriptor; 39 u64 req_max_processed_len; /* Max bytes in one GZIP request */ 40 u64 min_compress_len; /* Min compression in bytes */ 41 u64 min_decompress_len; /* Min decompression in bytes */ 42 }; 43 44 static u64 caps_feat; 45 static struct nx_cop_caps nx_cop_caps; 46 47 static struct nx842_constraints nx842_pseries_constraints = { 48 .alignment = DDE_BUFFER_ALIGN, 49 .multiple = DDE_BUFFER_LAST_MULT, 50 .minimum = DDE_BUFFER_LAST_MULT, 51 .maximum = PAGE_SIZE, /* dynamic, max_sync_size */ 52 }; 53 54 static int check_constraints(unsigned long buf, unsigned int *len, bool in) 55 { 56 if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) { 57 pr_debug("%s buffer 0x%lx not aligned to 0x%x\n", 58 in ? "input" : "output", buf, 59 nx842_pseries_constraints.alignment); 60 return -EINVAL; 61 } 62 if (*len % nx842_pseries_constraints.multiple) { 63 pr_debug("%s buffer len 0x%x not multiple of 0x%x\n", 64 in ? "input" : "output", *len, 65 nx842_pseries_constraints.multiple); 66 if (in) 67 return -EINVAL; 68 *len = round_down(*len, nx842_pseries_constraints.multiple); 69 } 70 if (*len < nx842_pseries_constraints.minimum) { 71 pr_debug("%s buffer len 0x%x under minimum 0x%x\n", 72 in ? "input" : "output", *len, 73 nx842_pseries_constraints.minimum); 74 return -EINVAL; 75 } 76 if (*len > nx842_pseries_constraints.maximum) { 77 pr_debug("%s buffer len 0x%x over maximum 0x%x\n", 78 in ? "input" : "output", *len, 79 nx842_pseries_constraints.maximum); 80 if (in) 81 return -EINVAL; 82 *len = nx842_pseries_constraints.maximum; 83 } 84 return 0; 85 } 86 87 /* I assume we need to align the CSB? */ 88 #define WORKMEM_ALIGN (256) 89 90 struct nx842_workmem { 91 /* scatterlist */ 92 char slin[4096]; 93 char slout[4096]; 94 /* coprocessor status/parameter block */ 95 struct nx_csbcpb csbcpb; 96 97 char padding[WORKMEM_ALIGN]; 98 } __aligned(WORKMEM_ALIGN); 99 100 /* Macros for fields within nx_csbcpb */ 101 /* Check the valid bit within the csbcpb valid field */ 102 #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) 103 104 /* CE macros operate on the completion_extension field bits in the csbcpb. 105 * CE0 0=full completion, 1=partial completion 106 * CE1 0=CE0 indicates completion, 1=termination (output may be modified) 107 * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ 108 #define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) 109 #define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) 110 #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) 111 112 /* The NX unit accepts data only on 4K page boundaries */ 113 #define NX842_HW_PAGE_SIZE (4096) 114 #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) 115 116 struct ibm_nx842_counters { 117 atomic64_t comp_complete; 118 atomic64_t comp_failed; 119 atomic64_t decomp_complete; 120 atomic64_t decomp_failed; 121 atomic64_t swdecomp; 122 atomic64_t comp_times[32]; 123 atomic64_t decomp_times[32]; 124 }; 125 126 static struct nx842_devdata { 127 struct vio_dev *vdev; 128 struct device *dev; 129 struct ibm_nx842_counters *counters; 130 unsigned int max_sg_len; 131 unsigned int max_sync_size; 132 unsigned int max_sync_sg; 133 } __rcu *devdata; 134 static DEFINE_SPINLOCK(devdata_mutex); 135 136 #define NX842_COUNTER_INC(_x) \ 137 static inline void nx842_inc_##_x( \ 138 const struct nx842_devdata *dev) { \ 139 if (dev) \ 140 atomic64_inc(&dev->counters->_x); \ 141 } 142 NX842_COUNTER_INC(comp_complete); 143 NX842_COUNTER_INC(comp_failed); 144 NX842_COUNTER_INC(decomp_complete); 145 NX842_COUNTER_INC(decomp_failed); 146 NX842_COUNTER_INC(swdecomp); 147 148 #define NX842_HIST_SLOTS 16 149 150 static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) 151 { 152 int bucket = fls(time); 153 154 if (bucket) 155 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); 156 157 atomic64_inc(×[bucket]); 158 } 159 160 /* NX unit operation flags */ 161 #define NX842_OP_COMPRESS 0x0 162 #define NX842_OP_CRC 0x1 163 #define NX842_OP_DECOMPRESS 0x2 164 #define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) 165 #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) 166 #define NX842_OP_ASYNC (1<<23) 167 #define NX842_OP_NOTIFY (1<<22) 168 #define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) 169 170 static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) 171 { 172 /* No use of DMA mappings within the driver. */ 173 return 0; 174 } 175 176 struct nx842_slentry { 177 __be64 ptr; /* Real address (use __pa()) */ 178 __be64 len; 179 }; 180 181 /* pHyp scatterlist entry */ 182 struct nx842_scatterlist { 183 int entry_nr; /* number of slentries */ 184 struct nx842_slentry *entries; /* ptr to array of slentries */ 185 }; 186 187 /* Does not include sizeof(entry_nr) in the size */ 188 static inline unsigned long nx842_get_scatterlist_size( 189 struct nx842_scatterlist *sl) 190 { 191 return sl->entry_nr * sizeof(struct nx842_slentry); 192 } 193 194 static int nx842_build_scatterlist(unsigned long buf, int len, 195 struct nx842_scatterlist *sl) 196 { 197 unsigned long entrylen; 198 struct nx842_slentry *entry; 199 200 sl->entry_nr = 0; 201 202 entry = sl->entries; 203 while (len) { 204 entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf)); 205 entrylen = min_t(int, len, 206 LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE)); 207 entry->len = cpu_to_be64(entrylen); 208 209 len -= entrylen; 210 buf += entrylen; 211 212 sl->entry_nr++; 213 entry++; 214 } 215 216 return 0; 217 } 218 219 static int nx842_validate_result(struct device *dev, 220 struct cop_status_block *csb) 221 { 222 /* The csb must be valid after returning from vio_h_cop_sync */ 223 if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { 224 dev_err(dev, "%s: cspcbp not valid upon completion.\n", 225 __func__); 226 dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", 227 csb->valid, 228 csb->crb_seq_number, 229 csb->completion_code, 230 csb->completion_extension); 231 dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", 232 be32_to_cpu(csb->processed_byte_count), 233 (unsigned long)be64_to_cpu(csb->address)); 234 return -EIO; 235 } 236 237 /* Check return values from the hardware in the CSB */ 238 switch (csb->completion_code) { 239 case 0: /* Completed without error */ 240 break; 241 case 64: /* Compression ok, but output larger than input */ 242 dev_dbg(dev, "%s: output size larger than input size\n", 243 __func__); 244 break; 245 case 13: /* Output buffer too small */ 246 dev_dbg(dev, "%s: Out of space in output buffer\n", 247 __func__); 248 return -ENOSPC; 249 case 65: /* Calculated CRC doesn't match the passed value */ 250 dev_dbg(dev, "%s: CRC mismatch for decompression\n", 251 __func__); 252 return -EINVAL; 253 case 66: /* Input data contains an illegal template field */ 254 case 67: /* Template indicates data past the end of the input stream */ 255 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", 256 __func__, csb->completion_code); 257 return -EINVAL; 258 default: 259 dev_dbg(dev, "%s: Unspecified error (code:%d)\n", 260 __func__, csb->completion_code); 261 return -EIO; 262 } 263 264 /* Hardware sanity check */ 265 if (!NX842_CSBCPB_CE2(csb->completion_extension)) { 266 dev_err(dev, "%s: No error returned by hardware, but " 267 "data returned is unusable, contact support.\n" 268 "(Additional info: csbcbp->processed bytes " 269 "does not specify processed bytes for the " 270 "target buffer.)\n", __func__); 271 return -EIO; 272 } 273 274 return 0; 275 } 276 277 /** 278 * nx842_pseries_compress - Compress data using the 842 algorithm 279 * 280 * Compression provide by the NX842 coprocessor on IBM Power systems. 281 * The input buffer is compressed and the result is stored in the 282 * provided output buffer. 283 * 284 * Upon return from this function @outlen contains the length of the 285 * compressed data. If there is an error then @outlen will be 0 and an 286 * error will be specified by the return code from this function. 287 * 288 * @in: Pointer to input buffer 289 * @inlen: Length of input buffer 290 * @out: Pointer to output buffer 291 * @outlen: Length of output buffer 292 * @wmem: ptr to buffer for working memory, size determined by 293 * nx842_pseries_driver.workmem_size 294 * 295 * Returns: 296 * 0 Success, output of length @outlen stored in the buffer at @out 297 * -ENOMEM Unable to allocate internal buffers 298 * -ENOSPC Output buffer is to small 299 * -EIO Internal error 300 * -ENODEV Hardware unavailable 301 */ 302 static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, 303 unsigned char *out, unsigned int *outlen, 304 void *wmem) 305 { 306 struct nx842_devdata *local_devdata; 307 struct device *dev = NULL; 308 struct nx842_workmem *workmem; 309 struct nx842_scatterlist slin, slout; 310 struct nx_csbcpb *csbcpb; 311 int ret = 0; 312 unsigned long inbuf, outbuf; 313 struct vio_pfo_op op = { 314 .done = NULL, 315 .handle = 0, 316 .timeout = 0, 317 }; 318 unsigned long start = get_tb(); 319 320 inbuf = (unsigned long)in; 321 if (check_constraints(inbuf, &inlen, true)) 322 return -EINVAL; 323 324 outbuf = (unsigned long)out; 325 if (check_constraints(outbuf, outlen, false)) 326 return -EINVAL; 327 328 rcu_read_lock(); 329 local_devdata = rcu_dereference(devdata); 330 if (!local_devdata || !local_devdata->dev) { 331 rcu_read_unlock(); 332 return -ENODEV; 333 } 334 dev = local_devdata->dev; 335 336 /* Init scatterlist */ 337 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); 338 slin.entries = (struct nx842_slentry *)workmem->slin; 339 slout.entries = (struct nx842_slentry *)workmem->slout; 340 341 /* Init operation */ 342 op.flags = NX842_OP_COMPRESS_CRC; 343 csbcpb = &workmem->csbcpb; 344 memset(csbcpb, 0, sizeof(*csbcpb)); 345 op.csbcpb = nx842_get_pa(csbcpb); 346 347 if ((inbuf & NX842_HW_PAGE_MASK) == 348 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { 349 /* Create direct DDE */ 350 op.in = nx842_get_pa((void *)inbuf); 351 op.inlen = inlen; 352 } else { 353 /* Create indirect DDE (scatterlist) */ 354 nx842_build_scatterlist(inbuf, inlen, &slin); 355 op.in = nx842_get_pa(slin.entries); 356 op.inlen = -nx842_get_scatterlist_size(&slin); 357 } 358 359 if ((outbuf & NX842_HW_PAGE_MASK) == 360 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { 361 /* Create direct DDE */ 362 op.out = nx842_get_pa((void *)outbuf); 363 op.outlen = *outlen; 364 } else { 365 /* Create indirect DDE (scatterlist) */ 366 nx842_build_scatterlist(outbuf, *outlen, &slout); 367 op.out = nx842_get_pa(slout.entries); 368 op.outlen = -nx842_get_scatterlist_size(&slout); 369 } 370 371 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", 372 __func__, (unsigned long)op.in, (long)op.inlen, 373 (unsigned long)op.out, (long)op.outlen); 374 375 /* Send request to pHyp */ 376 ret = vio_h_cop_sync(local_devdata->vdev, &op); 377 378 /* Check for pHyp error */ 379 if (ret) { 380 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", 381 __func__, ret, op.hcall_err); 382 ret = -EIO; 383 goto unlock; 384 } 385 386 /* Check for hardware error */ 387 ret = nx842_validate_result(dev, &csbcpb->csb); 388 if (ret) 389 goto unlock; 390 391 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); 392 dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen); 393 394 unlock: 395 if (ret) 396 nx842_inc_comp_failed(local_devdata); 397 else { 398 nx842_inc_comp_complete(local_devdata); 399 ibm_nx842_incr_hist(local_devdata->counters->comp_times, 400 (get_tb() - start) / tb_ticks_per_usec); 401 } 402 rcu_read_unlock(); 403 return ret; 404 } 405 406 /** 407 * nx842_pseries_decompress - Decompress data using the 842 algorithm 408 * 409 * Decompression provide by the NX842 coprocessor on IBM Power systems. 410 * The input buffer is decompressed and the result is stored in the 411 * provided output buffer. The size allocated to the output buffer is 412 * provided by the caller of this function in @outlen. Upon return from 413 * this function @outlen contains the length of the decompressed data. 414 * If there is an error then @outlen will be 0 and an error will be 415 * specified by the return code from this function. 416 * 417 * @in: Pointer to input buffer 418 * @inlen: Length of input buffer 419 * @out: Pointer to output buffer 420 * @outlen: Length of output buffer 421 * @wmem: ptr to buffer for working memory, size determined by 422 * nx842_pseries_driver.workmem_size 423 * 424 * Returns: 425 * 0 Success, output of length @outlen stored in the buffer at @out 426 * -ENODEV Hardware decompression device is unavailable 427 * -ENOMEM Unable to allocate internal buffers 428 * -ENOSPC Output buffer is to small 429 * -EINVAL Bad input data encountered when attempting decompress 430 * -EIO Internal error 431 */ 432 static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen, 433 unsigned char *out, unsigned int *outlen, 434 void *wmem) 435 { 436 struct nx842_devdata *local_devdata; 437 struct device *dev = NULL; 438 struct nx842_workmem *workmem; 439 struct nx842_scatterlist slin, slout; 440 struct nx_csbcpb *csbcpb; 441 int ret = 0; 442 unsigned long inbuf, outbuf; 443 struct vio_pfo_op op = { 444 .done = NULL, 445 .handle = 0, 446 .timeout = 0, 447 }; 448 unsigned long start = get_tb(); 449 450 /* Ensure page alignment and size */ 451 inbuf = (unsigned long)in; 452 if (check_constraints(inbuf, &inlen, true)) 453 return -EINVAL; 454 455 outbuf = (unsigned long)out; 456 if (check_constraints(outbuf, outlen, false)) 457 return -EINVAL; 458 459 rcu_read_lock(); 460 local_devdata = rcu_dereference(devdata); 461 if (!local_devdata || !local_devdata->dev) { 462 rcu_read_unlock(); 463 return -ENODEV; 464 } 465 dev = local_devdata->dev; 466 467 workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); 468 469 /* Init scatterlist */ 470 slin.entries = (struct nx842_slentry *)workmem->slin; 471 slout.entries = (struct nx842_slentry *)workmem->slout; 472 473 /* Init operation */ 474 op.flags = NX842_OP_DECOMPRESS_CRC; 475 csbcpb = &workmem->csbcpb; 476 memset(csbcpb, 0, sizeof(*csbcpb)); 477 op.csbcpb = nx842_get_pa(csbcpb); 478 479 if ((inbuf & NX842_HW_PAGE_MASK) == 480 ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { 481 /* Create direct DDE */ 482 op.in = nx842_get_pa((void *)inbuf); 483 op.inlen = inlen; 484 } else { 485 /* Create indirect DDE (scatterlist) */ 486 nx842_build_scatterlist(inbuf, inlen, &slin); 487 op.in = nx842_get_pa(slin.entries); 488 op.inlen = -nx842_get_scatterlist_size(&slin); 489 } 490 491 if ((outbuf & NX842_HW_PAGE_MASK) == 492 ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { 493 /* Create direct DDE */ 494 op.out = nx842_get_pa((void *)outbuf); 495 op.outlen = *outlen; 496 } else { 497 /* Create indirect DDE (scatterlist) */ 498 nx842_build_scatterlist(outbuf, *outlen, &slout); 499 op.out = nx842_get_pa(slout.entries); 500 op.outlen = -nx842_get_scatterlist_size(&slout); 501 } 502 503 dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", 504 __func__, (unsigned long)op.in, (long)op.inlen, 505 (unsigned long)op.out, (long)op.outlen); 506 507 /* Send request to pHyp */ 508 ret = vio_h_cop_sync(local_devdata->vdev, &op); 509 510 /* Check for pHyp error */ 511 if (ret) { 512 dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", 513 __func__, ret, op.hcall_err); 514 goto unlock; 515 } 516 517 /* Check for hardware error */ 518 ret = nx842_validate_result(dev, &csbcpb->csb); 519 if (ret) 520 goto unlock; 521 522 *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); 523 524 unlock: 525 if (ret) 526 /* decompress fail */ 527 nx842_inc_decomp_failed(local_devdata); 528 else { 529 nx842_inc_decomp_complete(local_devdata); 530 ibm_nx842_incr_hist(local_devdata->counters->decomp_times, 531 (get_tb() - start) / tb_ticks_per_usec); 532 } 533 534 rcu_read_unlock(); 535 return ret; 536 } 537 538 /** 539 * nx842_OF_set_defaults -- Set default (disabled) values for devdata 540 * 541 * @devdata: struct nx842_devdata to update 542 * 543 * Returns: 544 * 0 on success 545 * -ENOENT if @devdata ptr is NULL 546 */ 547 static int nx842_OF_set_defaults(struct nx842_devdata *devdata) 548 { 549 if (devdata) { 550 devdata->max_sync_size = 0; 551 devdata->max_sync_sg = 0; 552 devdata->max_sg_len = 0; 553 return 0; 554 } else 555 return -ENOENT; 556 } 557 558 /** 559 * nx842_OF_upd_status -- Check the device info from OF status prop 560 * 561 * The status property indicates if the accelerator is enabled. If the 562 * device is in the OF tree it indicates that the hardware is present. 563 * The status field indicates if the device is enabled when the status 564 * is 'okay'. Otherwise the device driver will be disabled. 565 * 566 * @devdata: struct nx842_devdata to use for dev_info 567 * @prop: struct property point containing the maxsyncop for the update 568 * 569 * Returns: 570 * 0 - Device is available 571 * -ENODEV - Device is not available 572 */ 573 static int nx842_OF_upd_status(struct nx842_devdata *devdata, 574 struct property *prop) 575 { 576 const char *status = (const char *)prop->value; 577 578 if (!strncmp(status, "okay", (size_t)prop->length)) 579 return 0; 580 if (!strncmp(status, "disabled", (size_t)prop->length)) 581 return -ENODEV; 582 dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status); 583 584 return -EINVAL; 585 } 586 587 /** 588 * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop 589 * 590 * Definition of the 'ibm,max-sg-len' OF property: 591 * This field indicates the maximum byte length of a scatter list 592 * for the platform facility. It is a single cell encoded as with encode-int. 593 * 594 * Example: 595 * # od -x ibm,max-sg-len 596 * 0000000 0000 0ff0 597 * 598 * In this example, the maximum byte length of a scatter list is 599 * 0x0ff0 (4,080). 600 * 601 * @devdata: struct nx842_devdata to update 602 * @prop: struct property point containing the maxsyncop for the update 603 * 604 * Returns: 605 * 0 on success 606 * -EINVAL on failure 607 */ 608 static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, 609 struct property *prop) { 610 int ret = 0; 611 const unsigned int maxsglen = of_read_number(prop->value, 1); 612 613 if (prop->length != sizeof(maxsglen)) { 614 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); 615 dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, 616 prop->length, sizeof(maxsglen)); 617 ret = -EINVAL; 618 } else { 619 devdata->max_sg_len = min_t(unsigned int, 620 maxsglen, NX842_HW_PAGE_SIZE); 621 } 622 623 return ret; 624 } 625 626 /** 627 * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop 628 * 629 * Definition of the 'ibm,max-sync-cop' OF property: 630 * Two series of cells. The first series of cells represents the maximums 631 * that can be synchronously compressed. The second series of cells 632 * represents the maximums that can be synchronously decompressed. 633 * 1. The first cell in each series contains the count of the number of 634 * data length, scatter list elements pairs that follow – each being 635 * of the form 636 * a. One cell data byte length 637 * b. One cell total number of scatter list elements 638 * 639 * Example: 640 * # od -x ibm,max-sync-cop 641 * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 642 * 0000020 0000 1000 0000 01fe 643 * 644 * In this example, compression supports 0x1000 (4,096) data byte length 645 * and 0x1fe (510) total scatter list elements. Decompression supports 646 * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list 647 * elements. 648 * 649 * @devdata: struct nx842_devdata to update 650 * @prop: struct property point containing the maxsyncop for the update 651 * 652 * Returns: 653 * 0 on success 654 * -EINVAL on failure 655 */ 656 static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, 657 struct property *prop) { 658 int ret = 0; 659 unsigned int comp_data_limit, decomp_data_limit; 660 unsigned int comp_sg_limit, decomp_sg_limit; 661 const struct maxsynccop_t { 662 __be32 comp_elements; 663 __be32 comp_data_limit; 664 __be32 comp_sg_limit; 665 __be32 decomp_elements; 666 __be32 decomp_data_limit; 667 __be32 decomp_sg_limit; 668 } *maxsynccop; 669 670 if (prop->length != sizeof(*maxsynccop)) { 671 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); 672 dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, 673 sizeof(*maxsynccop)); 674 ret = -EINVAL; 675 goto out; 676 } 677 678 maxsynccop = (const struct maxsynccop_t *)prop->value; 679 comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit); 680 comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit); 681 decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit); 682 decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit); 683 684 /* Use one limit rather than separate limits for compression and 685 * decompression. Set a maximum for this so as not to exceed the 686 * size that the header can support and round the value down to 687 * the hardware page size (4K) */ 688 devdata->max_sync_size = min(comp_data_limit, decomp_data_limit); 689 690 devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, 691 65536); 692 693 if (devdata->max_sync_size < 4096) { 694 dev_err(devdata->dev, "%s: hardware max data size (%u) is " 695 "less than the driver minimum, unable to use " 696 "the hardware device\n", 697 __func__, devdata->max_sync_size); 698 ret = -EINVAL; 699 goto out; 700 } 701 702 nx842_pseries_constraints.maximum = devdata->max_sync_size; 703 704 devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit); 705 if (devdata->max_sync_sg < 1) { 706 dev_err(devdata->dev, "%s: hardware max sg size (%u) is " 707 "less than the driver minimum, unable to use " 708 "the hardware device\n", 709 __func__, devdata->max_sync_sg); 710 ret = -EINVAL; 711 goto out; 712 } 713 714 out: 715 return ret; 716 } 717 718 /** 719 * nx842_OF_upd -- Handle OF properties updates for the device. 720 * 721 * Set all properties from the OF tree. Optionally, a new property 722 * can be provided by the @new_prop pointer to overwrite an existing value. 723 * The device will remain disabled until all values are valid, this function 724 * will return an error for updates unless all values are valid. 725 * 726 * @new_prop: If not NULL, this property is being updated. If NULL, update 727 * all properties from the current values in the OF tree. 728 * 729 * Returns: 730 * 0 - Success 731 * -ENOMEM - Could not allocate memory for new devdata structure 732 * -EINVAL - property value not found, new_prop is not a recognized 733 * property for the device or property value is not valid. 734 * -ENODEV - Device is not available 735 */ 736 static int nx842_OF_upd(struct property *new_prop) 737 { 738 struct nx842_devdata *old_devdata = NULL; 739 struct nx842_devdata *new_devdata = NULL; 740 struct device_node *of_node = NULL; 741 struct property *status = NULL; 742 struct property *maxsglen = NULL; 743 struct property *maxsyncop = NULL; 744 int ret = 0; 745 unsigned long flags; 746 747 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); 748 if (!new_devdata) 749 return -ENOMEM; 750 751 spin_lock_irqsave(&devdata_mutex, flags); 752 old_devdata = rcu_dereference_check(devdata, 753 lockdep_is_held(&devdata_mutex)); 754 if (old_devdata) 755 of_node = old_devdata->dev->of_node; 756 757 if (!old_devdata || !of_node) { 758 pr_err("%s: device is not available\n", __func__); 759 spin_unlock_irqrestore(&devdata_mutex, flags); 760 kfree(new_devdata); 761 return -ENODEV; 762 } 763 764 memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); 765 new_devdata->counters = old_devdata->counters; 766 767 /* Set ptrs for existing properties */ 768 status = of_find_property(of_node, "status", NULL); 769 maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); 770 maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); 771 if (!status || !maxsglen || !maxsyncop) { 772 dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); 773 ret = -EINVAL; 774 goto error_out; 775 } 776 777 /* 778 * If this is a property update, there are only certain properties that 779 * we care about. Bail if it isn't in the below list 780 */ 781 if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || 782 strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || 783 strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) 784 goto out; 785 786 /* Perform property updates */ 787 ret = nx842_OF_upd_status(new_devdata, status); 788 if (ret) 789 goto error_out; 790 791 ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); 792 if (ret) 793 goto error_out; 794 795 ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); 796 if (ret) 797 goto error_out; 798 799 out: 800 dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", 801 __func__, new_devdata->max_sync_size, 802 old_devdata->max_sync_size); 803 dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", 804 __func__, new_devdata->max_sync_sg, 805 old_devdata->max_sync_sg); 806 dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", 807 __func__, new_devdata->max_sg_len, 808 old_devdata->max_sg_len); 809 810 rcu_assign_pointer(devdata, new_devdata); 811 spin_unlock_irqrestore(&devdata_mutex, flags); 812 synchronize_rcu(); 813 dev_set_drvdata(new_devdata->dev, new_devdata); 814 kfree(old_devdata); 815 return 0; 816 817 error_out: 818 if (new_devdata) { 819 dev_info(old_devdata->dev, "%s: device disabled\n", __func__); 820 nx842_OF_set_defaults(new_devdata); 821 rcu_assign_pointer(devdata, new_devdata); 822 spin_unlock_irqrestore(&devdata_mutex, flags); 823 synchronize_rcu(); 824 dev_set_drvdata(new_devdata->dev, new_devdata); 825 kfree(old_devdata); 826 } else { 827 dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); 828 spin_unlock_irqrestore(&devdata_mutex, flags); 829 } 830 831 if (!ret) 832 ret = -EINVAL; 833 return ret; 834 } 835 836 /** 837 * nx842_OF_notifier - Process updates to OF properties for the device 838 * 839 * @np: notifier block 840 * @action: notifier action 841 * @data: struct of_reconfig_data pointer 842 * 843 * Returns: 844 * NOTIFY_OK on success 845 * NOTIFY_BAD encoded with error number on failure, use 846 * notifier_to_errno() to decode this value 847 */ 848 static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, 849 void *data) 850 { 851 struct of_reconfig_data *upd = data; 852 struct nx842_devdata *local_devdata; 853 struct device_node *node = NULL; 854 855 rcu_read_lock(); 856 local_devdata = rcu_dereference(devdata); 857 if (local_devdata) 858 node = local_devdata->dev->of_node; 859 860 if (local_devdata && 861 action == OF_RECONFIG_UPDATE_PROPERTY && 862 !strcmp(upd->dn->name, node->name)) { 863 rcu_read_unlock(); 864 nx842_OF_upd(upd->prop); 865 } else 866 rcu_read_unlock(); 867 868 return NOTIFY_OK; 869 } 870 871 static struct notifier_block nx842_of_nb = { 872 .notifier_call = nx842_OF_notifier, 873 }; 874 875 #define nx842_counter_read(_name) \ 876 static ssize_t nx842_##_name##_show(struct device *dev, \ 877 struct device_attribute *attr, \ 878 char *buf) { \ 879 struct nx842_devdata *local_devdata; \ 880 int p = 0; \ 881 rcu_read_lock(); \ 882 local_devdata = rcu_dereference(devdata); \ 883 if (local_devdata) \ 884 p = snprintf(buf, PAGE_SIZE, "%lld\n", \ 885 atomic64_read(&local_devdata->counters->_name)); \ 886 rcu_read_unlock(); \ 887 return p; \ 888 } 889 890 #define NX842DEV_COUNTER_ATTR_RO(_name) \ 891 nx842_counter_read(_name); \ 892 static struct device_attribute dev_attr_##_name = __ATTR(_name, \ 893 0444, \ 894 nx842_##_name##_show,\ 895 NULL); 896 897 NX842DEV_COUNTER_ATTR_RO(comp_complete); 898 NX842DEV_COUNTER_ATTR_RO(comp_failed); 899 NX842DEV_COUNTER_ATTR_RO(decomp_complete); 900 NX842DEV_COUNTER_ATTR_RO(decomp_failed); 901 NX842DEV_COUNTER_ATTR_RO(swdecomp); 902 903 static ssize_t nx842_timehist_show(struct device *, 904 struct device_attribute *, char *); 905 906 static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, 907 nx842_timehist_show, NULL); 908 static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, 909 0444, nx842_timehist_show, NULL); 910 911 static ssize_t nx842_timehist_show(struct device *dev, 912 struct device_attribute *attr, char *buf) { 913 char *p = buf; 914 struct nx842_devdata *local_devdata; 915 atomic64_t *times; 916 int bytes_remain = PAGE_SIZE; 917 int bytes; 918 int i; 919 920 rcu_read_lock(); 921 local_devdata = rcu_dereference(devdata); 922 if (!local_devdata) { 923 rcu_read_unlock(); 924 return 0; 925 } 926 927 if (attr == &dev_attr_comp_times) 928 times = local_devdata->counters->comp_times; 929 else if (attr == &dev_attr_decomp_times) 930 times = local_devdata->counters->decomp_times; 931 else { 932 rcu_read_unlock(); 933 return 0; 934 } 935 936 for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { 937 bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n", 938 i ? (2<<(i-1)) : 0, (2<<i)-1, 939 atomic64_read(×[i])); 940 bytes_remain -= bytes; 941 p += bytes; 942 } 943 /* The last bucket holds everything over 944 * 2<<(NX842_HIST_SLOTS - 2) us */ 945 bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n", 946 2<<(NX842_HIST_SLOTS - 2), 947 atomic64_read(×[(NX842_HIST_SLOTS - 1)])); 948 p += bytes; 949 950 rcu_read_unlock(); 951 return p - buf; 952 } 953 954 static struct attribute *nx842_sysfs_entries[] = { 955 &dev_attr_comp_complete.attr, 956 &dev_attr_comp_failed.attr, 957 &dev_attr_decomp_complete.attr, 958 &dev_attr_decomp_failed.attr, 959 &dev_attr_swdecomp.attr, 960 &dev_attr_comp_times.attr, 961 &dev_attr_decomp_times.attr, 962 NULL, 963 }; 964 965 static const struct attribute_group nx842_attribute_group = { 966 .name = NULL, /* put in device directory */ 967 .attrs = nx842_sysfs_entries, 968 }; 969 970 #define nxcop_caps_read(_name) \ 971 static ssize_t nxcop_##_name##_show(struct device *dev, \ 972 struct device_attribute *attr, char *buf) \ 973 { \ 974 return sprintf(buf, "%lld\n", nx_cop_caps._name); \ 975 } 976 977 #define NXCT_ATTR_RO(_name) \ 978 nxcop_caps_read(_name); \ 979 static struct device_attribute dev_attr_##_name = __ATTR(_name, \ 980 0444, \ 981 nxcop_##_name##_show, \ 982 NULL); 983 984 NXCT_ATTR_RO(req_max_processed_len); 985 NXCT_ATTR_RO(min_compress_len); 986 NXCT_ATTR_RO(min_decompress_len); 987 988 static struct attribute *nxcop_caps_sysfs_entries[] = { 989 &dev_attr_req_max_processed_len.attr, 990 &dev_attr_min_compress_len.attr, 991 &dev_attr_min_decompress_len.attr, 992 NULL, 993 }; 994 995 static const struct attribute_group nxcop_caps_attr_group = { 996 .name = "nx_gzip_caps", 997 .attrs = nxcop_caps_sysfs_entries, 998 }; 999 1000 static struct nx842_driver nx842_pseries_driver = { 1001 .name = KBUILD_MODNAME, 1002 .owner = THIS_MODULE, 1003 .workmem_size = sizeof(struct nx842_workmem), 1004 .constraints = &nx842_pseries_constraints, 1005 .compress = nx842_pseries_compress, 1006 .decompress = nx842_pseries_decompress, 1007 }; 1008 1009 static int nx842_pseries_crypto_init(struct crypto_tfm *tfm) 1010 { 1011 return nx842_crypto_init(tfm, &nx842_pseries_driver); 1012 } 1013 1014 static struct crypto_alg nx842_pseries_alg = { 1015 .cra_name = "842", 1016 .cra_driver_name = "842-nx", 1017 .cra_priority = 300, 1018 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, 1019 .cra_ctxsize = sizeof(struct nx842_crypto_ctx), 1020 .cra_module = THIS_MODULE, 1021 .cra_init = nx842_pseries_crypto_init, 1022 .cra_exit = nx842_crypto_exit, 1023 .cra_u = { .compress = { 1024 .coa_compress = nx842_crypto_compress, 1025 .coa_decompress = nx842_crypto_decompress } } 1026 }; 1027 1028 static int nx842_probe(struct vio_dev *viodev, 1029 const struct vio_device_id *id) 1030 { 1031 struct nx842_devdata *old_devdata, *new_devdata = NULL; 1032 unsigned long flags; 1033 int ret = 0; 1034 1035 new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); 1036 if (!new_devdata) 1037 return -ENOMEM; 1038 1039 new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), 1040 GFP_NOFS); 1041 if (!new_devdata->counters) { 1042 kfree(new_devdata); 1043 return -ENOMEM; 1044 } 1045 1046 spin_lock_irqsave(&devdata_mutex, flags); 1047 old_devdata = rcu_dereference_check(devdata, 1048 lockdep_is_held(&devdata_mutex)); 1049 1050 if (old_devdata && old_devdata->vdev != NULL) { 1051 dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); 1052 ret = -1; 1053 goto error_unlock; 1054 } 1055 1056 dev_set_drvdata(&viodev->dev, NULL); 1057 1058 new_devdata->vdev = viodev; 1059 new_devdata->dev = &viodev->dev; 1060 nx842_OF_set_defaults(new_devdata); 1061 1062 rcu_assign_pointer(devdata, new_devdata); 1063 spin_unlock_irqrestore(&devdata_mutex, flags); 1064 synchronize_rcu(); 1065 kfree(old_devdata); 1066 1067 of_reconfig_notifier_register(&nx842_of_nb); 1068 1069 ret = nx842_OF_upd(NULL); 1070 if (ret) 1071 goto error; 1072 1073 ret = crypto_register_alg(&nx842_pseries_alg); 1074 if (ret) { 1075 dev_err(&viodev->dev, "could not register comp alg: %d\n", ret); 1076 goto error; 1077 } 1078 1079 rcu_read_lock(); 1080 dev_set_drvdata(&viodev->dev, rcu_dereference(devdata)); 1081 rcu_read_unlock(); 1082 1083 if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { 1084 dev_err(&viodev->dev, "could not create sysfs device attributes\n"); 1085 ret = -1; 1086 goto error; 1087 } 1088 1089 if (caps_feat) { 1090 if (sysfs_create_group(&viodev->dev.kobj, 1091 &nxcop_caps_attr_group)) { 1092 dev_err(&viodev->dev, 1093 "Could not create sysfs NX capability entries\n"); 1094 ret = -1; 1095 goto error; 1096 } 1097 } 1098 1099 return 0; 1100 1101 error_unlock: 1102 spin_unlock_irqrestore(&devdata_mutex, flags); 1103 if (new_devdata) 1104 kfree(new_devdata->counters); 1105 kfree(new_devdata); 1106 error: 1107 return ret; 1108 } 1109 1110 static void nx842_remove(struct vio_dev *viodev) 1111 { 1112 struct nx842_devdata *old_devdata; 1113 unsigned long flags; 1114 1115 pr_info("Removing IBM Power 842 compression device\n"); 1116 sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); 1117 1118 if (caps_feat) 1119 sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group); 1120 1121 crypto_unregister_alg(&nx842_pseries_alg); 1122 1123 spin_lock_irqsave(&devdata_mutex, flags); 1124 old_devdata = rcu_dereference_check(devdata, 1125 lockdep_is_held(&devdata_mutex)); 1126 of_reconfig_notifier_unregister(&nx842_of_nb); 1127 RCU_INIT_POINTER(devdata, NULL); 1128 spin_unlock_irqrestore(&devdata_mutex, flags); 1129 synchronize_rcu(); 1130 dev_set_drvdata(&viodev->dev, NULL); 1131 if (old_devdata) 1132 kfree(old_devdata->counters); 1133 kfree(old_devdata); 1134 } 1135 1136 /* 1137 * Get NX capabilities from the hypervisor. 1138 * Only NXGZIP capabilities are provided by the hypersvisor right 1139 * now and these values are available to user space with sysfs. 1140 */ 1141 static void __init nxcop_get_capabilities(void) 1142 { 1143 struct hv_vas_all_caps *hv_caps; 1144 struct hv_nx_cop_caps *hv_nxc; 1145 int rc; 1146 1147 hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL); 1148 if (!hv_caps) 1149 return; 1150 /* 1151 * Get NX overall capabilities with feature type=0 1152 */ 1153 rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0, 1154 (u64)virt_to_phys(hv_caps)); 1155 if (rc) 1156 goto out; 1157 1158 caps_feat = be64_to_cpu(hv_caps->feat_type); 1159 /* 1160 * NX-GZIP feature available 1161 */ 1162 if (caps_feat & VAS_NX_GZIP_FEAT_BIT) { 1163 hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); 1164 if (!hv_nxc) 1165 goto out; 1166 /* 1167 * Get capabilities for NX-GZIP feature 1168 */ 1169 rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 1170 VAS_NX_GZIP_FEAT, 1171 (u64)virt_to_phys(hv_nxc)); 1172 } else { 1173 pr_err("NX-GZIP feature is not available\n"); 1174 rc = -EINVAL; 1175 } 1176 1177 if (!rc) { 1178 nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor); 1179 nx_cop_caps.req_max_processed_len = 1180 be64_to_cpu(hv_nxc->req_max_processed_len); 1181 nx_cop_caps.min_compress_len = 1182 be64_to_cpu(hv_nxc->min_compress_len); 1183 nx_cop_caps.min_decompress_len = 1184 be64_to_cpu(hv_nxc->min_decompress_len); 1185 } else { 1186 caps_feat = 0; 1187 } 1188 1189 kfree(hv_nxc); 1190 out: 1191 kfree(hv_caps); 1192 } 1193 1194 static const struct vio_device_id nx842_vio_driver_ids[] = { 1195 {"ibm,compression-v1", "ibm,compression"}, 1196 {"", ""}, 1197 }; 1198 MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids); 1199 1200 static struct vio_driver nx842_vio_driver = { 1201 .name = KBUILD_MODNAME, 1202 .probe = nx842_probe, 1203 .remove = nx842_remove, 1204 .get_desired_dma = nx842_get_desired_dma, 1205 .id_table = nx842_vio_driver_ids, 1206 }; 1207 1208 static int __init nx842_pseries_init(void) 1209 { 1210 struct nx842_devdata *new_devdata; 1211 int ret; 1212 1213 if (!of_find_compatible_node(NULL, NULL, "ibm,compression")) 1214 return -ENODEV; 1215 1216 RCU_INIT_POINTER(devdata, NULL); 1217 new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); 1218 if (!new_devdata) 1219 return -ENOMEM; 1220 1221 RCU_INIT_POINTER(devdata, new_devdata); 1222 /* 1223 * Get NX capabilities from the hypervisor. 1224 */ 1225 nxcop_get_capabilities(); 1226 1227 ret = vio_register_driver(&nx842_vio_driver); 1228 if (ret) { 1229 pr_err("Could not register VIO driver %d\n", ret); 1230 1231 kfree(new_devdata); 1232 return ret; 1233 } 1234 1235 ret = vas_register_api_pseries(THIS_MODULE, VAS_COP_TYPE_GZIP, 1236 "nx-gzip"); 1237 1238 if (ret) 1239 pr_err("NX-GZIP is not supported. Returned=%d\n", ret); 1240 1241 return 0; 1242 } 1243 1244 module_init(nx842_pseries_init); 1245 1246 static void __exit nx842_pseries_exit(void) 1247 { 1248 struct nx842_devdata *old_devdata; 1249 unsigned long flags; 1250 1251 vas_unregister_api_pseries(); 1252 1253 crypto_unregister_alg(&nx842_pseries_alg); 1254 1255 spin_lock_irqsave(&devdata_mutex, flags); 1256 old_devdata = rcu_dereference_check(devdata, 1257 lockdep_is_held(&devdata_mutex)); 1258 RCU_INIT_POINTER(devdata, NULL); 1259 spin_unlock_irqrestore(&devdata_mutex, flags); 1260 synchronize_rcu(); 1261 if (old_devdata && old_devdata->dev) 1262 dev_set_drvdata(old_devdata->dev, NULL); 1263 kfree(old_devdata); 1264 vio_unregister_driver(&nx842_vio_driver); 1265 } 1266 1267 module_exit(nx842_pseries_exit); 1268 1269