Lines Matching full:job

76  * received by the AIO job so far.
82 static void t4_aio_cancel_active(struct kaiocb *job);
83 static void t4_aio_cancel_queued(struct kaiocb *job);
164 ddp_complete_one(struct kaiocb *job, int error)
169 * If this job had copied data out of the socket buffer before
173 copied = job->aio_received;
175 aio_complete(job, copied, 0);
177 aio_complete(job, -1, error);
275 if (db->job) {
282 if (!aio_clear_cancel_function(db->job))
283 ddp_complete_one(db->job, 0);
285 db->job = NULL;
356 MPASS(toep->ddp.db[i].job == NULL);
381 KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL,
401 db->job = NULL;
467 struct kaiocb *job;
505 job = db->job;
506 copied = job->aio_received;
508 if (placed > job->uaiocb.aio_nbytes - copied)
509 placed = job->uaiocb.aio_nbytes - copied;
511 job->msgrcv = 1;
515 if (!aio_clear_cancel_function(job)) {
521 job->aio_received += placed;
525 __func__, job, copied, placed);
527 aio_complete(job, copied + placed, 0);
528 } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) {
529 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
532 aio_cancel(job);
648 struct kaiocb *job;
665 job = db->job;
674 if (aio_clear_cancel_function(job))
675 ddp_complete_one(job, ECONNRESET);
724 job->msgrcv = 1;
729 * Update the job's length but defer completion to the
732 job->aio_received += len;
734 } else if (!aio_clear_cancel_function(job)) {
739 job->aio_received += len;
741 copied = job->aio_received;
745 __func__, toep->tid, job, copied, len);
747 aio_complete(job, copied + len, 0);
1023 struct kaiocb *job;
1046 * handle_ddp_data() should leave the job around until
1050 MPASS(db->job != NULL);
1063 job = db->job;
1064 copied = job->aio_received;
1066 CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job);
1067 aio_cancel(job);
1070 __func__, job, copied);
1071 aio_complete(job, copied, 0);
1095 struct kaiocb *job;
1137 job = db->job;
1138 copied = job->aio_received;
1140 if (placed > job->uaiocb.aio_nbytes - copied)
1141 placed = job->uaiocb.aio_nbytes - copied;
1143 job->msgrcv = 1;
1147 if (!aio_clear_cancel_function(job)) {
1153 job->aio_received += placed;
1157 aio_complete(job, copied + placed, 0);
2177 hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps)
2192 vm = job->userproc->p_vmspace;
2194 start = (uintptr_t)job->uaiocb.aio_buf;
2196 end = round_page(start + job->uaiocb.aio_nbytes);
2211 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes,
2213 job->uaiocb.aio_nbytes = end - (start + pgoff);
2225 job->uaiocb.aio_nbytes) == 0) {
2268 ps->len = job->uaiocb.aio_nbytes;
2273 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d",
2274 __func__, toep->tid, ps, job, ps->npages);
2282 struct kaiocb *job;
2287 job = TAILQ_FIRST(&toep->ddp.aiojobq);
2288 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2290 if (aio_clear_cancel_function(job))
2291 ddp_complete_one(job, error);
2296 aio_ddp_cancel_one(struct kaiocb *job)
2301 * If this job had copied data out of the socket buffer before
2305 copied = job->aio_received;
2307 aio_complete(job, copied, 0);
2309 aio_cancel(job);
2313 * Called when the main loop wants to requeue a job to retry it later.
2314 * Deals with the race of the job being cancelled while it was being
2318 aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job)
2323 aio_set_cancel_function(job, t4_aio_cancel_queued)) {
2324 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
2327 aio_ddp_cancel_one(job);
2337 struct kaiocb *job;
2360 job = TAILQ_FIRST(&toep->ddp.aiojobq);
2361 so = job->fd_file->f_data;
2379 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2380 if (!aio_clear_cancel_function(job)) {
2386 * If this job has previously copied some data, report
2390 copied = job->aio_received;
2393 aio_complete(job, copied, 0);
2399 aio_complete(job, -1, error);
2447 /* Take the next job to prep it for DDP. */
2449 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2450 if (!aio_clear_cancel_function(job))
2452 toep->ddp.queueing = job;
2455 error = hold_aio(toep, job, &ps);
2457 ddp_complete_one(job, error);
2464 copied = job->aio_received;
2468 aio_complete(job, copied, 0);
2477 aio_complete(job, -1, error);
2491 aio_ddp_requeue_one(toep, job);
2495 ddp_complete_one(job, 0);
2514 offset = ps->offset + job->aio_received;
2515 MPASS(job->aio_received <= job->uaiocb.aio_nbytes);
2516 resid = job->uaiocb.aio_nbytes - job->aio_received;
2550 job->aio_received += copied;
2551 job->msgrcv = 1;
2552 copied = job->aio_received;
2557 * the AIO job should keep 'sb' and 'inp' stable.
2584 aio_complete(job, copied, 0);
2597 aio_ddp_requeue_one(toep, job);
2614 aio_ddp_requeue_one(toep, job);
2626 if (toep->ddp.db[0].job == NULL) {
2629 MPASS(toep->ddp.db[1].job == NULL);
2664 * end, the AIO job holds a reference on this end of the socket
2666 * after the job is completed.
2669 job->aio_received, ps->len, ddp_flags, ddp_flags_mask);
2672 aio_ddp_requeue_one(toep, job);
2687 if (!aio_set_cancel_function(job, t4_aio_cancel_active)) {
2690 aio_ddp_cancel_one(job);
2698 toep->tid, job, db_idx, ddp_flags, ddp_flags_mask);
2704 db->job = job;
2744 t4_aio_cancel_active(struct kaiocb *job)
2746 struct socket *so = job->fd_file->f_data;
2754 if (aio_cancel_cleared(job)) {
2756 aio_ddp_cancel_one(job);
2761 if (toep->ddp.db[i].job == job) {
2762 /* Should only ever get one cancel request for a job. */
2777 __func__, job);
2785 t4_aio_cancel_queued(struct kaiocb *job)
2787 struct socket *so = job->fd_file->f_data;
2792 if (!aio_cancel_cleared(job)) {
2793 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2798 CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job);
2801 aio_ddp_cancel_one(job);
2805 t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
2812 if (job->uaiocb.aio_lio_opcode != LIO_READ)
2850 CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid);
2852 if (!aio_set_cancel_function(job, t4_aio_cancel_queued))
2853 panic("new job was cancelled");
2854 TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list);