Lines Matching full:req

37 	const struct nfs_page *req;  member
42 const struct nfs_page *req) in nfs_page_iter_page_init() argument
44 i->req = req; in nfs_page_iter_page_init()
50 const struct nfs_page *req = i->req; in nfs_page_iter_page_advance() local
53 i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes; in nfs_page_iter_page_advance()
58 const struct nfs_page *req = i->req; in nfs_page_iter_page_get() local
61 if (i->count != req->wb_bytes) { in nfs_page_iter_page_get()
62 size_t base = i->count + req->wb_pgbase; in nfs_page_iter_page_get()
65 page = nfs_page_to_page(req, base); in nfs_page_iter_page_get()
102 hdr->req = nfs_list_entry(mirror->pg_list.next); in nfs_pgheader_init()
104 hdr->cred = nfs_req_openctx(hdr->req)->cred; in nfs_pgheader_init()
105 hdr->io_start = req_offset(hdr->req); in nfs_pgheader_init()
192 * @req: request that is to be locked
194 * this lock must be held when modifying req->wb_head
199 nfs_page_set_headlock(struct nfs_page *req) in nfs_page_set_headlock() argument
201 if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags)) in nfs_page_set_headlock()
204 set_bit(PG_CONTENDED1, &req->wb_flags); in nfs_page_set_headlock()
206 return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK, in nfs_page_set_headlock()
212 * @req: request that is to be locked
215 nfs_page_clear_headlock(struct nfs_page *req) in nfs_page_clear_headlock() argument
217 clear_bit_unlock(PG_HEADLOCK, &req->wb_flags); in nfs_page_clear_headlock()
219 if (!test_bit(PG_CONTENDED1, &req->wb_flags)) in nfs_page_clear_headlock()
221 wake_up_bit(&req->wb_flags, PG_HEADLOCK); in nfs_page_clear_headlock()
226 * @req: request in group that is to be locked
234 nfs_page_group_lock(struct nfs_page *req) in nfs_page_group_lock() argument
238 ret = nfs_page_set_headlock(req); in nfs_page_group_lock()
239 if (ret || req->wb_head == req) in nfs_page_group_lock()
241 return nfs_page_set_headlock(req->wb_head); in nfs_page_group_lock()
246 * @req: request in group that is to be unlocked
249 nfs_page_group_unlock(struct nfs_page *req) in nfs_page_group_unlock() argument
251 if (req != req->wb_head) in nfs_page_group_unlock()
252 nfs_page_clear_headlock(req->wb_head); in nfs_page_group_unlock()
253 nfs_page_clear_headlock(req); in nfs_page_group_unlock()
258 * @req: request in page group
263 bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) in nfs_page_group_sync_on_bit_locked() argument
265 struct nfs_page *head = req->wb_head; in nfs_page_group_sync_on_bit_locked()
269 WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags)); in nfs_page_group_sync_on_bit_locked()
271 tmp = req->wb_this_page; in nfs_page_group_sync_on_bit_locked()
272 while (tmp != req) { in nfs_page_group_sync_on_bit_locked()
279 tmp = req; in nfs_page_group_sync_on_bit_locked()
283 } while (tmp != req); in nfs_page_group_sync_on_bit_locked()
291 * @req - request in page group
294 bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) in nfs_page_group_sync_on_bit() argument
298 nfs_page_group_lock(req); in nfs_page_group_sync_on_bit()
299 ret = nfs_page_group_sync_on_bit_locked(req, bit); in nfs_page_group_sync_on_bit()
300 nfs_page_group_unlock(req); in nfs_page_group_sync_on_bit()
306 * nfs_page_group_init - Initialize the page group linkage for @req
307 * @req - a new nfs request
308 * @prev - the previous request in page group, or NULL if @req is the first
312 nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) in nfs_page_group_init() argument
315 WARN_ON_ONCE(prev == req); in nfs_page_group_init()
319 req->wb_head = req; in nfs_page_group_init()
320 req->wb_this_page = req; in nfs_page_group_init()
325 req->wb_head = prev->wb_head; in nfs_page_group_init()
326 req->wb_this_page = prev->wb_this_page; in nfs_page_group_init()
327 prev->wb_this_page = req; in nfs_page_group_init()
331 kref_get(&req->wb_head->wb_kref); in nfs_page_group_init()
337 inode = nfs_page_to_inode(req); in nfs_page_group_init()
338 set_bit(PG_INODE_REF, &req->wb_flags); in nfs_page_group_init()
339 kref_get(&req->wb_kref); in nfs_page_group_init()
347 * @req - request that no longer needs the page group
355 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); in nfs_page_group_destroy() local
356 struct nfs_page *head = req->wb_head; in nfs_page_group_destroy()
359 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) in nfs_page_group_destroy()
362 tmp = req; in nfs_page_group_destroy()
370 } while (tmp != req); in nfs_page_group_destroy()
373 if (head != req) in nfs_page_group_destroy()
381 struct nfs_page *req; in nfs_page_create() local
387 req = nfs_page_alloc(); in nfs_page_create()
388 if (req == NULL) in nfs_page_create()
391 req->wb_lock_context = l_ctx; in nfs_page_create()
398 req->wb_pgbase = pgbase; in nfs_page_create()
399 req->wb_index = index; in nfs_page_create()
400 req->wb_offset = offset; in nfs_page_create()
401 req->wb_bytes = count; in nfs_page_create()
402 kref_init(&req->wb_kref); in nfs_page_create()
403 req->wb_nio = 0; in nfs_page_create()
404 return req; in nfs_page_create()
407 static void nfs_page_assign_folio(struct nfs_page *req, struct folio *folio) in nfs_page_assign_folio() argument
410 req->wb_folio = folio; in nfs_page_assign_folio()
412 set_bit(PG_FOLIO, &req->wb_flags); in nfs_page_assign_folio()
416 static void nfs_page_assign_page(struct nfs_page *req, struct page *page) in nfs_page_assign_page() argument
419 req->wb_page = page; in nfs_page_assign_page()
487 nfs_create_subreq(struct nfs_page *req, in nfs_create_subreq() argument
494 struct folio *folio = nfs_page_to_folio(req); in nfs_create_subreq()
495 struct page *page = nfs_page_to_page(req, pgbase); in nfs_create_subreq()
497 ret = nfs_page_create(req->wb_lock_context, pgbase, req->wb_index, in nfs_create_subreq()
505 for (last = req->wb_head; in nfs_create_subreq()
506 last->wb_this_page != req->wb_head; in nfs_create_subreq()
512 ret->wb_nio = req->wb_nio; in nfs_create_subreq()
519 * @req: pointer to request
521 void nfs_unlock_request(struct nfs_page *req) in nfs_unlock_request() argument
523 clear_bit_unlock(PG_BUSY, &req->wb_flags); in nfs_unlock_request()
525 if (!test_bit(PG_CONTENDED2, &req->wb_flags)) in nfs_unlock_request()
527 wake_up_bit(&req->wb_flags, PG_BUSY); in nfs_unlock_request()
532 * @req: pointer to request
534 void nfs_unlock_and_release_request(struct nfs_page *req) in nfs_unlock_and_release_request() argument
536 nfs_unlock_request(req); in nfs_unlock_and_release_request()
537 nfs_release_request(req); in nfs_unlock_and_release_request()
542 * @req:
547 static void nfs_clear_request(struct nfs_page *req) in nfs_clear_request() argument
549 struct folio *folio = nfs_page_to_folio(req); in nfs_clear_request()
550 struct page *page = req->wb_page; in nfs_clear_request()
551 struct nfs_lock_context *l_ctx = req->wb_lock_context; in nfs_clear_request()
556 req->wb_folio = NULL; in nfs_clear_request()
557 clear_bit(PG_FOLIO, &req->wb_flags); in nfs_clear_request()
560 req->wb_page = NULL; in nfs_clear_request()
570 req->wb_lock_context = NULL; in nfs_clear_request()
576 * @req: request to release
580 void nfs_free_request(struct nfs_page *req) in nfs_free_request() argument
582 WARN_ON_ONCE(req->wb_this_page != req); in nfs_free_request()
585 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); in nfs_free_request()
586 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags)); in nfs_free_request()
587 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags)); in nfs_free_request()
588 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags)); in nfs_free_request()
589 WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags)); in nfs_free_request()
592 nfs_clear_request(req); in nfs_free_request()
593 nfs_page_free(req); in nfs_free_request()
596 void nfs_release_request(struct nfs_page *req) in nfs_release_request() argument
598 kref_put(&req->wb_kref, nfs_page_group_destroy); in nfs_release_request()
606 * @req: this request
608 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
612 struct nfs_page *prev, struct nfs_page *req) in nfs_generic_pg_test() argument
627 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * in nfs_generic_pg_test()
631 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); in nfs_generic_pg_test()
686 struct nfs_page *req = hdr->req; in nfs_pgio_rpcsetup() local
692 hdr->args.offset = req_offset(req); in nfs_pgio_rpcsetup()
698 hdr->args.context = get_nfs_open_context(nfs_req_openctx(req)); in nfs_pgio_rpcsetup()
699 hdr->args.lock_context = req->wb_lock_context; in nfs_pgio_rpcsetup()
760 "(req %s/%llu, %u bytes @ offset %llu)\n", in nfs_initiate_pgio()
880 struct nfs_page *req; in nfs_generic_pgio() local
913 req = nfs_list_entry(head->next); in nfs_generic_pgio()
914 nfs_list_move_request(req, &hdr->pages); in nfs_generic_pgio()
916 if (req->wb_pgbase == 0) in nfs_generic_pgio()
919 nfs_page_iter_page_init(&i, req); in nfs_generic_pgio()
1008 struct nfs_page *req) in nfs_pageio_setup_mirroring() argument
1013 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req); in nfs_pageio_setup_mirroring()
1047 const struct nfs_page *req) in nfs_page_is_contiguous() argument
1051 if (req_offset(req) != req_offset(prev) + prev->wb_bytes) in nfs_page_is_contiguous()
1053 if (req->wb_pgbase == 0) in nfs_page_is_contiguous()
1055 if (req->wb_pgbase == prev_end) { in nfs_page_is_contiguous()
1056 struct folio *folio = nfs_page_to_folio(req); in nfs_page_is_contiguous()
1059 return req->wb_page == prev->wb_page; in nfs_page_is_contiguous()
1067 * @req: pointer to nfs_page
1070 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1077 struct nfs_page *req, in nfs_coalesce_size() argument
1083 if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev))) in nfs_coalesce_size()
1085 flctx = locks_inode_context(d_inode(nfs_req_openctx(req)->dentry)); in nfs_coalesce_size()
1089 !nfs_match_lock_context(req->wb_lock_context, in nfs_coalesce_size()
1092 if (!nfs_page_is_contiguous(prev, req)) in nfs_coalesce_size()
1095 return pgio->pg_ops->pg_test(pgio, prev, req); in nfs_coalesce_size()
1101 * @req: request
1103 * If the request 'req' was successfully coalesced into the existing list
1104 * of pages 'desc', it returns the size of req.
1108 struct nfs_page *req) in nfs_pageio_do_add_request() argument
1116 desc->pg_ops->pg_init(desc, req); in nfs_pageio_do_add_request()
1119 mirror->pg_base = req->wb_pgbase; in nfs_pageio_do_add_request()
1125 if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) { in nfs_pageio_do_add_request()
1133 size = nfs_coalesce_size(prev, req, desc); in nfs_pageio_do_add_request()
1134 if (size < req->wb_bytes) in nfs_pageio_do_add_request()
1136 nfs_list_move_request(req, &mirror->pg_list); in nfs_pageio_do_add_request()
1137 mirror->pg_count += req->wb_bytes; in nfs_pageio_do_add_request()
1138 return req->wb_bytes; in nfs_pageio_do_add_request()
1159 struct nfs_page *req) in nfs_pageio_cleanup_request() argument
1163 nfs_list_move_request(req, &head); in nfs_pageio_cleanup_request()
1170 * @req: request
1173 * same page group. If so, it will submit @req as the last one, to ensure
1174 * the pointer to @req is still valid in case of failure.
1176 * Returns true if the request 'req' was successfully coalesced into the
1180 struct nfs_page *req) in __nfs_pageio_add_request() argument
1186 nfs_page_group_lock(req); in __nfs_pageio_add_request()
1188 subreq = req; in __nfs_pageio_add_request()
1194 if (subreq == req) in __nfs_pageio_add_request()
1196 req->wb_pgbase += size; in __nfs_pageio_add_request()
1197 req->wb_bytes -= size; in __nfs_pageio_add_request()
1198 req->wb_offset += size; in __nfs_pageio_add_request()
1199 subreq_size = req->wb_bytes; in __nfs_pageio_add_request()
1200 subreq = req; in __nfs_pageio_add_request()
1203 if (WARN_ON_ONCE(subreq != req)) { in __nfs_pageio_add_request()
1204 nfs_page_group_unlock(req); in __nfs_pageio_add_request()
1206 subreq = req; in __nfs_pageio_add_request()
1207 subreq_size = req->wb_bytes; in __nfs_pageio_add_request()
1208 nfs_page_group_lock(req); in __nfs_pageio_add_request()
1212 nfs_page_group_unlock(req); in __nfs_pageio_add_request()
1218 nfs_page_group_lock(req); in __nfs_pageio_add_request()
1221 subreq = nfs_create_subreq(req, req->wb_pgbase, in __nfs_pageio_add_request()
1222 req->wb_offset, size); in __nfs_pageio_add_request()
1228 nfs_page_group_unlock(req); in __nfs_pageio_add_request()
1232 nfs_page_group_unlock(req); in __nfs_pageio_add_request()
1246 struct nfs_page *req; in nfs_do_recoalesce() local
1248 req = list_first_entry(&head, struct nfs_page, wb_list); in nfs_do_recoalesce()
1249 if (__nfs_pageio_add_request(desc, req)) in nfs_do_recoalesce()
1263 struct nfs_page *req) in nfs_pageio_add_request_mirror() argument
1268 ret = __nfs_pageio_add_request(desc, req); in nfs_pageio_add_request_mirror()
1295 struct nfs_page *req) in nfs_pageio_add_request() argument
1301 pgbase = req->wb_pgbase; in nfs_pageio_add_request()
1302 offset = req->wb_offset; in nfs_pageio_add_request()
1303 bytes = req->wb_bytes; in nfs_pageio_add_request()
1305 nfs_pageio_setup_mirroring(desc, req); in nfs_pageio_add_request()
1311 nfs_page_group_lock(req); in nfs_pageio_add_request()
1313 dupreq = nfs_create_subreq(req, in nfs_pageio_add_request()
1316 nfs_page_group_unlock(req); in nfs_pageio_add_request()
1328 if (!nfs_pageio_add_request_mirror(desc, req)) in nfs_pageio_add_request()
1385 struct nfs_page *req = nfs_list_entry(pages.next); in nfs_pageio_resend() local
1387 if (!nfs_pageio_add_request(desc, req)) in nfs_pageio_resend()