Lines Matching full:request

292 __tape_cancel_io(struct tape_device *device, struct tape_request *request)  in __tape_cancel_io()  argument
298 if (request->callback == NULL) in __tape_cancel_io()
303 rc = ccw_device_clear(device->cdev, (long) request); in __tape_cancel_io()
307 request->status = TAPE_REQUEST_DONE; in __tape_cancel_io()
310 request->status = TAPE_REQUEST_CANCEL; in __tape_cancel_io()
434 * request. We may prevent this by returning an error.
592 struct tape_request * request; in __tape_discard_requests() local
596 request = list_entry(l, struct tape_request, list); in __tape_discard_requests()
597 if (request->status == TAPE_REQUEST_IN_IO) in __tape_discard_requests()
598 request->status = TAPE_REQUEST_DONE; in __tape_discard_requests()
599 list_del(&request->list); in __tape_discard_requests()
601 /* Decrease ref_count for removed request. */ in __tape_discard_requests()
602 request->device = NULL; in __tape_discard_requests()
604 request->rc = -EIO; in __tape_discard_requests()
605 if (request->callback != NULL) in __tape_discard_requests()
606 request->callback(request, request->callback_data); in __tape_discard_requests()
649 * an interrupt for a request that was running. So we in tape_generic_remove()
671 * Allocate a new tape ccw request
676 struct tape_request *request; in tape_alloc_request() local
682 request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); in tape_alloc_request()
683 if (request == NULL) { in tape_alloc_request()
689 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), in tape_alloc_request()
691 if (request->cpaddr == NULL) { in tape_alloc_request()
693 kfree(request); in tape_alloc_request()
699 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); in tape_alloc_request()
700 if (request->cpdata == NULL) { in tape_alloc_request()
702 kfree(request->cpaddr); in tape_alloc_request()
703 kfree(request); in tape_alloc_request()
707 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, in tape_alloc_request()
708 request->cpdata); in tape_alloc_request()
710 return request; in tape_alloc_request()
714 * Free tape ccw request
717 tape_free_request (struct tape_request * request) in tape_free_request() argument
719 DBF_LH(6, "Free request %p\n", request); in tape_free_request()
721 if (request->device) in tape_free_request()
722 tape_put_device(request->device); in tape_free_request()
723 kfree(request->cpdata); in tape_free_request()
724 kfree(request->cpaddr); in tape_free_request()
725 kfree(request); in tape_free_request()
729 __tape_start_io(struct tape_device *device, struct tape_request *request) in __tape_start_io() argument
735 request->cpaddr, in __tape_start_io()
736 (unsigned long) request, in __tape_start_io()
738 request->options in __tape_start_io()
741 request->status = TAPE_REQUEST_IN_IO; in __tape_start_io()
744 request->status = TAPE_REQUEST_QUEUED; in __tape_start_io()
748 /* Start failed. Remove request and indicate failure. */ in __tape_start_io()
749 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); in __tape_start_io()
758 struct tape_request *request; in __tape_start_next_request() local
763 * Try to start each request on request queue until one is in __tape_start_next_request()
767 request = list_entry(l, struct tape_request, list); in __tape_start_next_request()
773 if (request->status == TAPE_REQUEST_IN_IO) in __tape_start_next_request()
776 * Request has already been stopped. We have to wait until in __tape_start_next_request()
777 * the request is removed from the queue in the interrupt in __tape_start_next_request()
780 if (request->status == TAPE_REQUEST_DONE) in __tape_start_next_request()
784 * We wanted to cancel the request but the common I/O layer in __tape_start_next_request()
787 * Otherwise we start the next request on the queue. in __tape_start_next_request()
789 if (request->status == TAPE_REQUEST_CANCEL) { in __tape_start_next_request()
790 rc = __tape_cancel_io(device, request); in __tape_start_next_request()
792 rc = __tape_start_io(device, request); in __tape_start_next_request()
798 request->rc = rc; in __tape_start_next_request()
799 request->status = TAPE_REQUEST_DONE; in __tape_start_next_request()
801 /* Remove from request queue. */ in __tape_start_next_request()
802 list_del(&request->list); in __tape_start_next_request()
805 if (request->callback != NULL) in __tape_start_next_request()
806 request->callback(request, request->callback_data); in __tape_start_next_request()
825 struct tape_request *request; in tape_long_busy_timeout() local
828 request = list_entry(device->req_queue.next, struct tape_request, list); in tape_long_busy_timeout()
829 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); in tape_long_busy_timeout()
839 struct tape_request * request, in __tape_end_request() argument
842 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); in __tape_end_request()
843 if (request) { in __tape_end_request()
844 request->rc = rc; in __tape_end_request()
845 request->status = TAPE_REQUEST_DONE; in __tape_end_request()
847 /* Remove from request queue. */ in __tape_end_request()
848 list_del(&request->list); in __tape_end_request()
851 if (request->callback != NULL) in __tape_end_request()
852 request->callback(request, request->callback_data); in __tape_end_request()
855 /* Start next request. */ in __tape_end_request()
864 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, in tape_dump_sense_dbf() argument
870 if (request != NULL) in tape_dump_sense_dbf()
871 op = tape_op_verbose[request->op]; in tape_dump_sense_dbf()
885 * I/O helper function. Adds the request to the request queue
890 __tape_start_request(struct tape_device *device, struct tape_request *request) in __tape_start_request() argument
894 switch (request->op) { in __tape_start_request()
912 /* Increase use count of device for the added request. */ in __tape_start_request()
913 request->device = tape_get_device(device); in __tape_start_request()
917 rc = __tape_start_io(device, request); in __tape_start_request()
921 DBF_LH(5, "Request %p added for execution.\n", request); in __tape_start_request()
922 list_add(&request->list, &device->req_queue); in __tape_start_request()
924 DBF_LH(5, "Request %p add to queue.\n", request); in __tape_start_request()
925 request->status = TAPE_REQUEST_QUEUED; in __tape_start_request()
926 list_add_tail(&request->list, &device->req_queue); in __tape_start_request()
932 * Add the request to the request queue, try to start it if the
936 tape_do_io_async(struct tape_device *device, struct tape_request *request) in tape_do_io_async() argument
940 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); in tape_do_io_async()
943 /* Add request to request queue and try to start it. */ in tape_do_io_async()
944 rc = __tape_start_request(device, request); in tape_do_io_async()
951 * Add the request to the request queue, try to start it if the
955 __tape_wake_up(struct tape_request *request, void *data) in __tape_wake_up() argument
957 request->callback = NULL; in __tape_wake_up()
962 tape_do_io(struct tape_device *device, struct tape_request *request) in tape_do_io() argument
968 request->callback = __tape_wake_up; in tape_do_io()
969 request->callback_data = &device->wait_queue; in tape_do_io()
970 /* Add request to request queue and try to start it. */ in tape_do_io()
971 rc = __tape_start_request(device, request); in tape_do_io()
975 /* Request added to the queue. Wait for its completion. */ in tape_do_io()
976 wait_event(device->wait_queue, (request->callback == NULL)); in tape_do_io()
977 /* Get rc from request */ in tape_do_io()
978 return request->rc; in tape_do_io()
983 * Add the request to the request queue, try to start it if the
987 __tape_wake_up_interruptible(struct tape_request *request, void *data) in __tape_wake_up_interruptible() argument
989 request->callback = NULL; in __tape_wake_up_interruptible()
995 struct tape_request *request) in tape_do_io_interruptible() argument
1001 request->callback = __tape_wake_up_interruptible; in tape_do_io_interruptible()
1002 request->callback_data = &device->wait_queue; in tape_do_io_interruptible()
1003 rc = __tape_start_request(device, request); in tape_do_io_interruptible()
1007 /* Request added to the queue. Wait for its completion. */ in tape_do_io_interruptible()
1009 (request->callback == NULL)); in tape_do_io_interruptible()
1011 /* Request finished normally. */ in tape_do_io_interruptible()
1012 return request->rc; in tape_do_io_interruptible()
1014 /* Interrupted by a signal. We have to stop the current request. */ in tape_do_io_interruptible()
1016 rc = __tape_cancel_io(device, request); in tape_do_io_interruptible()
1023 (request->callback == NULL) in tape_do_io_interruptible()
1037 tape_cancel_io(struct tape_device *device, struct tape_request *request) in tape_cancel_io() argument
1042 rc = __tape_cancel_io(device, request); in tape_cancel_io()
1054 struct tape_request *request; in __tape_do_irq() local
1061 request = (struct tape_request *) intparm; in __tape_do_irq()
1063 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); in __tape_do_irq()
1067 /* FIXME: What to do with the request? */ in __tape_do_irq()
1070 DBF_LH(1, "(%08x): Request timed out\n", in __tape_do_irq()
1074 __tape_end_request(device, request, -EIO); in __tape_do_irq()
1087 * error might still apply. So we just schedule the request to be in __tape_do_irq()
1092 (request->status == TAPE_REQUEST_IN_IO)) { in __tape_do_irq()
1095 request->status = TAPE_REQUEST_QUEUED; in __tape_do_irq()
1101 if(request != NULL) in __tape_do_irq()
1102 request->rescnt = irb->scsw.cmd.count; in __tape_do_irq()
1126 * Any request that does not come back with channel end in __tape_do_irq()
1130 tape_dump_sense_dbf(device, request, irb); in __tape_do_irq()
1141 * Request that were canceled still come back with an interrupt. in __tape_do_irq()
1142 * To detect these request the state will be set to TAPE_REQUEST_DONE. in __tape_do_irq()
1144 if(request != NULL && request->status == TAPE_REQUEST_DONE) { in __tape_do_irq()
1145 __tape_end_request(device, request, -EIO); in __tape_do_irq()
1149 rc = device->discipline->irq(device, request, irb); in __tape_do_irq()
1151 * rc < 0 : request finished unsuccessfully. in __tape_do_irq()
1152 * rc == TAPE_IO_SUCCESS: request finished successfully. in __tape_do_irq()
1153 * rc == TAPE_IO_PENDING: request is still running. Ignore rc. in __tape_do_irq()
1154 * rc == TAPE_IO_RETRY: request finished but needs another go. in __tape_do_irq()
1155 * rc == TAPE_IO_STOP: request needs to get terminated. in __tape_do_irq()
1161 __tape_end_request(device, request, rc); in __tape_do_irq()
1170 request->status = TAPE_REQUEST_LONG_BUSY; in __tape_do_irq()
1173 rc = __tape_start_io(device, request); in __tape_do_irq()
1175 __tape_end_request(device, request, rc); in __tape_do_irq()
1178 rc = __tape_cancel_io(device, request); in __tape_do_irq()
1180 __tape_end_request(device, request, rc); in __tape_do_irq()
1185 __tape_end_request(device, request, -EIO); in __tape_do_irq()
1187 __tape_end_request(device, request, rc); in __tape_do_irq()