Lines Matching defs:thr

581 	struct task_struct *thr;                  /* thread */
604 d->thr = NULL;
623 struct task_struct *thr; /* thread */
651 d->thr = NULL;
692 unsigned thr, run_threads, nr_threads;
732 for (thr = 0; thr < nr_threads; thr++) {
733 init_waitqueue_head(&data[thr].go);
734 init_waitqueue_head(&data[thr].done);
736 data[thr].cc = crypto_alloc_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC);
737 if (IS_ERR_OR_NULL(data[thr].cc)) {
738 pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
743 data[thr].cr = acomp_request_alloc(data[thr].cc);
744 if (!data[thr].cr) {
750 data[thr].thr = kthread_run(compress_threadfn,
751 &data[thr],
752 "image_compress/%u", thr);
753 if (IS_ERR(data[thr].thr)) {
754 data[thr].thr = NULL;
769 for (thr = 0; thr < nr_threads; thr++) {
770 crc->unc[thr] = data[thr].unc;
771 crc->unc_len[thr] = &data[thr].unc_len;
774 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
775 if (IS_ERR(crc->thr)) {
776 crc->thr = NULL;
797 for (thr = 0; thr < nr_threads; thr++) {
806 memcpy(data[thr].unc + off,
817 data[thr].unc_len = off;
819 atomic_set_release(&data[thr].ready, 1);
820 wake_up(&data[thr].go);
823 if (!thr)
826 crc->run_threads = thr;
830 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
831 wait_event(data[thr].done,
832 atomic_read_acquire(&data[thr].stop));
833 atomic_set(&data[thr].stop, 0);
835 ret = data[thr].ret;
842 if (unlikely(!data[thr].cmp_len ||
843 data[thr].cmp_len >
844 bytes_worst_compress(data[thr].unc_len))) {
850 *(size_t *)data[thr].cmp = data[thr].cmp_len;
861 off < CMP_HEADER + data[thr].cmp_len;
863 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
889 if (crc->thr)
890 kthread_stop(crc->thr);
894 for (thr = 0; thr < nr_threads; thr++) {
895 if (data[thr].thr)
896 kthread_stop(data[thr].thr);
897 acomp_request_free(data[thr].cr);
898 crypto_free_acomp(data[thr].cc);
1142 struct task_struct *thr; /* thread */
1167 d->thr = NULL;
1211 unsigned i, thr, run_threads, nr_threads;
1254 for (thr = 0; thr < nr_threads; thr++) {
1255 init_waitqueue_head(&data[thr].go);
1256 init_waitqueue_head(&data[thr].done);
1258 data[thr].cc = crypto_alloc_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC);
1259 if (IS_ERR_OR_NULL(data[thr].cc)) {
1260 pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
1265 data[thr].cr = acomp_request_alloc(data[thr].cc);
1266 if (!data[thr].cr) {
1272 data[thr].thr = kthread_run(decompress_threadfn,
1273 &data[thr],
1274 "image_decompress/%u", thr);
1275 if (IS_ERR(data[thr].thr)) {
1276 data[thr].thr = NULL;
1291 for (thr = 0; thr < nr_threads; thr++) {
1292 crc->unc[thr] = data[thr].unc;
1293 crc->unc_len[thr] = &data[thr].unc_len;
1296 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1297 if (IS_ERR(crc->thr)) {
1298 crc->thr = NULL;
1391 for (thr = 0; have && thr < nr_threads; thr++) {
1392 data[thr].cmp_len = *(size_t *)page[pg];
1393 if (unlikely(!data[thr].cmp_len ||
1394 data[thr].cmp_len >
1401 need = DIV_ROUND_UP(data[thr].cmp_len + CMP_HEADER,
1412 off < CMP_HEADER + data[thr].cmp_len;
1414 memcpy(data[thr].cmp + off,
1422 atomic_set_release(&data[thr].ready, 1);
1423 wake_up(&data[thr].go);
1439 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1440 wait_event(data[thr].done,
1441 atomic_read_acquire(&data[thr].stop));
1442 atomic_set(&data[thr].stop, 0);
1444 ret = data[thr].ret;
1451 if (unlikely(!data[thr].unc_len ||
1452 data[thr].unc_len > UNC_SIZE ||
1453 data[thr].unc_len & (PAGE_SIZE - 1))) {
1460 off < data[thr].unc_len; off += PAGE_SIZE) {
1462 data[thr].unc + off, PAGE_SIZE);
1471 crc->run_threads = thr + 1;
1479 crc->run_threads = thr;
1510 if (crc->thr)
1511 kthread_stop(crc->thr);
1515 for (thr = 0; thr < nr_threads; thr++) {
1516 if (data[thr].thr)
1517 kthread_stop(data[thr].thr);
1518 acomp_request_free(data[thr].cr);
1519 crypto_free_acomp(data[thr].cc);