card_ddcb.c (08e4906cc29d092ae2da0ff089efe1488e584d3c) | card_ddcb.c (1451f414639465995dfc1f820aa1a64723cbd662) |
---|---|
1/** 2 * IBM Accelerator Family 'GenWQE' 3 * 4 * (C) Copyright IBM Corp. 2013 5 * 6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> 7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> 8 * Author: Michael Jung <mijung@gmx.net> --- 434 unchanged lines hidden (view full) --- 443 pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), 444 vcrc, vcrc_16); 445 } 446 447 ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); 448 queue->ddcbs_completed++; 449 queue->ddcbs_in_flight--; 450 | 1/** 2 * IBM Accelerator Family 'GenWQE' 3 * 4 * (C) Copyright IBM Corp. 2013 5 * 6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> 7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> 8 * Author: Michael Jung <mijung@gmx.net> --- 434 unchanged lines hidden (view full) --- 443 pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), 444 vcrc, vcrc_16); 445 } 446 447 ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); 448 queue->ddcbs_completed++; 449 queue->ddcbs_in_flight--; 450 |
451 /* wake up process waiting for this DDCB */ | 451 /* wake up process waiting for this DDCB, and 452 processes on the busy queue */ |
452 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); | 453 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); |
454 wake_up_interruptible(&queue->busy_waitq); |
|
453 454pick_next_one: 455 queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; 456 ddcbs_finished++; 457 } 458 459 go_home: 460 spin_unlock_irqrestore(&queue->ddcb_lock, flags); --- 279 unchanged lines hidden (view full) --- 740 snprintf(d->driver_version, len, "%s", DRV_VERSION); 741 d->slu_unitcfg = cd->slu_unitcfg; 742 d->app_unitcfg = cd->app_unitcfg; 743 return 0; 744} 745 746/** 747 * __genwqe_enqueue_ddcb() - Enqueue a DDCB | 455 456pick_next_one: 457 queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; 458 ddcbs_finished++; 459 } 460 461 go_home: 462 spin_unlock_irqrestore(&queue->ddcb_lock, flags); --- 279 unchanged lines hidden (view full) --- 742 snprintf(d->driver_version, len, "%s", DRV_VERSION); 743 d->slu_unitcfg = cd->slu_unitcfg; 744 d->app_unitcfg = cd->app_unitcfg; 745 return 0; 746} 747 748/** 749 * __genwqe_enqueue_ddcb() - Enqueue a DDCB |
748 * @cd: pointer to genwqe device descriptor 749 * @req: pointer to DDCB execution request | 750 * @cd: pointer to genwqe device descriptor 751 * @req: pointer to DDCB execution request 752 * @f_flags: file mode: blocking, non-blocking |
750 * 751 * Return: 0 if enqueuing succeeded 752 * -EIO if card is unusable/PCIe problems 753 * -EBUSY if enqueuing failed 754 */ | 753 * 754 * Return: 0 if enqueuing succeeded 755 * -EIO if card is unusable/PCIe problems 756 * -EBUSY if enqueuing failed 757 */ |
755int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) | 758int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req, 759 unsigned int f_flags) |
756{ 757 struct ddcb *pddcb; 758 unsigned long flags; 759 struct ddcb_queue *queue; 760 struct pci_dev *pci_dev = cd->pci_dev; 761 u16 icrc; 762 | 760{ 761 struct ddcb *pddcb; 762 unsigned long flags; 763 struct ddcb_queue *queue; 764 struct pci_dev *pci_dev = cd->pci_dev; 765 u16 icrc; 766 |
767 retry: |
|
763 if (cd->card_state != GENWQE_CARD_USED) { 764 printk_ratelimited(KERN_ERR 765 "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n", 766 GENWQE_DEVNAME, dev_name(&pci_dev->dev), 767 __func__, req->num); 768 return -EIO; 769 } 770 --- 9 unchanged lines hidden (view full) --- 780 * It must be ensured to process all DDCBs in successive 781 * order. Use a lock here in order to prevent nested DDCB 782 * enqueuing. 783 */ 784 spin_lock_irqsave(&queue->ddcb_lock, flags); 785 786 pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ 787 if (pddcb == NULL) { | 768 if (cd->card_state != GENWQE_CARD_USED) { 769 printk_ratelimited(KERN_ERR 770 "%s %s: [%s] Card is unusable/PCIe problem Req#%d\n", 771 GENWQE_DEVNAME, dev_name(&pci_dev->dev), 772 __func__, req->num); 773 return -EIO; 774 } 775 --- 9 unchanged lines hidden (view full) --- 785 * It must be ensured to process all DDCBs in successive 786 * order. Use a lock here in order to prevent nested DDCB 787 * enqueuing. 788 */ 789 spin_lock_irqsave(&queue->ddcb_lock, flags); 790 791 pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ 792 if (pddcb == NULL) { |
793 int rc; 794 |
|
788 spin_unlock_irqrestore(&queue->ddcb_lock, flags); | 795 spin_unlock_irqrestore(&queue->ddcb_lock, flags); |
789 queue->busy++; 790 return -EBUSY; | 796 797 if (f_flags & O_NONBLOCK) { 798 queue->return_on_busy++; 799 return -EBUSY; 800 } 801 802 queue->wait_on_busy++; 803 rc = wait_event_interruptible(queue->busy_waitq, 804 queue_free_ddcbs(queue) != 0); 805 dev_dbg(&pci_dev->dev, "[%s] waiting for free DDCB: rc=%d\n", 806 __func__, rc); 807 if (rc == -ERESTARTSYS) 808 return rc; /* interrupted by a signal */ 809 810 goto retry; |
791 } 792 793 if (queue->ddcb_req[req->num] != NULL) { 794 spin_unlock_irqrestore(&queue->ddcb_lock, flags); 795 796 dev_err(&pci_dev->dev, 797 "[%s] picked DDCB %d with req=%p still in use!!\n", 798 __func__, req->num, req); --- 86 unchanged lines hidden (view full) --- 885 886 return 0; 887} 888 889/** 890 * __genwqe_execute_raw_ddcb() - Setup and execute DDCB 891 * @cd: pointer to genwqe device descriptor 892 * @req: user provided DDCB request | 811 } 812 813 if (queue->ddcb_req[req->num] != NULL) { 814 spin_unlock_irqrestore(&queue->ddcb_lock, flags); 815 816 dev_err(&pci_dev->dev, 817 "[%s] picked DDCB %d with req=%p still in use!!\n", 818 __func__, req->num, req); --- 86 unchanged lines hidden (view full) --- 905 906 return 0; 907} 908 909/** 910 * __genwqe_execute_raw_ddcb() - Setup and execute DDCB 911 * @cd: pointer to genwqe device descriptor 912 * @req: user provided DDCB request |
913 * @f_flags: file mode: blocking, non-blocking |
|
893 */ 894int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, | 914 */ 915int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd, |
895 struct genwqe_ddcb_cmd *cmd) | 916 struct genwqe_ddcb_cmd *cmd, 917 unsigned int f_flags) |
896{ 897 int rc = 0; 898 struct pci_dev *pci_dev = cd->pci_dev; 899 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); 900 901 if (cmd->asiv_length > DDCB_ASIV_LENGTH) { 902 dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n", 903 __func__, cmd->asiv_length); 904 return -EINVAL; 905 } 906 if (cmd->asv_length > DDCB_ASV_LENGTH) { 907 dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", 908 __func__, cmd->asiv_length); 909 return -EINVAL; 910 } | 918{ 919 int rc = 0; 920 struct pci_dev *pci_dev = cd->pci_dev; 921 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); 922 923 if (cmd->asiv_length > DDCB_ASIV_LENGTH) { 924 dev_err(&pci_dev->dev, "[%s] err: wrong asiv_length of %d\n", 925 __func__, cmd->asiv_length); 926 return -EINVAL; 927 } 928 if (cmd->asv_length > DDCB_ASV_LENGTH) { 929 dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n", 930 __func__, cmd->asiv_length); 931 return -EINVAL; 932 } |
911 rc = __genwqe_enqueue_ddcb(cd, req); | 933 rc = __genwqe_enqueue_ddcb(cd, req, f_flags); |
912 if (rc != 0) 913 return rc; 914 915 rc = __genwqe_wait_ddcb(cd, req); 916 if (rc < 0) /* error or signal interrupt */ 917 goto err_exit; 918 919 if (ddcb_requ_collect_debug_data(req)) { --- 89 unchanged lines hidden (view full) --- 1009 if (genwqe_ddcb_max < 2) 1010 return -EINVAL; 1011 1012 queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE); 1013 1014 queue->ddcbs_in_flight = 0; /* statistics */ 1015 queue->ddcbs_max_in_flight = 0; 1016 queue->ddcbs_completed = 0; | 934 if (rc != 0) 935 return rc; 936 937 rc = __genwqe_wait_ddcb(cd, req); 938 if (rc < 0) /* error or signal interrupt */ 939 goto err_exit; 940 941 if (ddcb_requ_collect_debug_data(req)) { --- 89 unchanged lines hidden (view full) --- 1031 if (genwqe_ddcb_max < 2) 1032 return -EINVAL; 1033 1034 queue_size = roundup(genwqe_ddcb_max * sizeof(struct ddcb), PAGE_SIZE); 1035 1036 queue->ddcbs_in_flight = 0; /* statistics */ 1037 queue->ddcbs_max_in_flight = 0; 1038 queue->ddcbs_completed = 0; |
1017 queue->busy = 0; | 1039 queue->return_on_busy = 0; 1040 queue->wait_on_busy = 0; |
1018 1019 queue->ddcb_seq = 0x100; /* start sequence number */ 1020 queue->ddcb_max = genwqe_ddcb_max; /* module parameter */ 1021 queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size, 1022 &queue->ddcb_daddr); 1023 if (queue->ddcb_vaddr == NULL) { 1024 dev_err(&pci_dev->dev, 1025 "[%s] **err: could not allocate DDCB **\n", __func__); --- 23 unchanged lines hidden (view full) --- 1049 queue->ddcb_req[i] = NULL; /* requests */ 1050 init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */ 1051 } 1052 1053 queue->ddcb_act = 0; 1054 queue->ddcb_next = 0; /* queue is empty */ 1055 1056 spin_lock_init(&queue->ddcb_lock); | 1041 1042 queue->ddcb_seq = 0x100; /* start sequence number */ 1043 queue->ddcb_max = genwqe_ddcb_max; /* module parameter */ 1044 queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size, 1045 &queue->ddcb_daddr); 1046 if (queue->ddcb_vaddr == NULL) { 1047 dev_err(&pci_dev->dev, 1048 "[%s] **err: could not allocate DDCB **\n", __func__); --- 23 unchanged lines hidden (view full) --- 1072 queue->ddcb_req[i] = NULL; /* requests */ 1073 init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */ 1074 } 1075 1076 queue->ddcb_act = 0; 1077 queue->ddcb_next = 0; /* queue is empty */ 1078 1079 spin_lock_init(&queue->ddcb_lock); |
1057 init_waitqueue_head(&queue->ddcb_waitq); | 1080 init_waitqueue_head(&queue->busy_waitq); |
1058 1059 val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */ 1060 __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */ 1061 __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr); 1062 __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq); 1063 __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64); 1064 return 0; 1065 --- 231 unchanged lines hidden (view full) --- 1297 unsigned long flags; 1298 struct ddcb_queue *queue = &cd->queue; 1299 1300 spin_lock_irqsave(&queue->ddcb_lock, flags); 1301 1302 for (i = 0; i < queue->ddcb_max; i++) 1303 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); 1304 | 1081 1082 val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */ 1083 __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */ 1084 __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr); 1085 __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq); 1086 __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64); 1087 return 0; 1088 --- 231 unchanged lines hidden (view full) --- 1320 unsigned long flags; 1321 struct ddcb_queue *queue = &cd->queue; 1322 1323 spin_lock_irqsave(&queue->ddcb_lock, flags); 1324 1325 for (i = 0; i < queue->ddcb_max; i++) 1326 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); 1327 |
1328 wake_up_interruptible(&queue->busy_waitq); |
|
1305 spin_unlock_irqrestore(&queue->ddcb_lock, flags); 1306 1307 return 0; 1308} 1309 1310/** 1311 * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces 1312 * --- 75 unchanged lines hidden --- | 1329 spin_unlock_irqrestore(&queue->ddcb_lock, flags); 1330 1331 return 0; 1332} 1333 1334/** 1335 * genwqe_finish_queue() - Remove any genwqe devices and user-interfaces 1336 * --- 75 unchanged lines hidden --- |