Home
last modified time | relevance | path

Searched refs:rhp (Results 1 – 25 of 34) sorted by relevance

12

/linux/drivers/infiniband/hw/cxgb4/
H A Dmem.c388 return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL); in finish_mem_reg()
391 static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, in register_mem() argument
397 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, in register_mem()
410 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in register_mem()
419 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, in alloc_pbl()
432 struct c4iw_dev *rhp; in c4iw_get_dma_mr() local
440 rhp = php->rhp; in c4iw_get_dma_mr()
458 mhp->rhp = rhp; in c4iw_get_dma_mr()
468 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, in c4iw_get_dma_mr()
480 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, in c4iw_get_dma_mr()
[all …]
H A Dprovider.c65 struct c4iw_dev *rhp; in c4iw_dealloc_ucontext() local
69 rhp = to_c4iw_dev(ucontext->ibucontext.device); in c4iw_dealloc_ucontext()
73 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); in c4iw_dealloc_ucontext()
81 struct c4iw_dev *rhp = to_c4iw_dev(ibdev); in c4iw_alloc_ucontext() local
87 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); in c4iw_alloc_ucontext()
93 rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED; in c4iw_alloc_ucontext()
114 mm->addr = virt_to_phys(rhp->rdev.status_page); in c4iw_alloc_ucontext()
118 insert_flag_to_mmap(&rhp->rdev, mm, mm->addr); in c4iw_alloc_ucontext()
195 struct c4iw_dev *rhp; in c4iw_deallocate_pd() local
199 rhp = php->rhp; in c4iw_deallocate_pd()
[all …]
H A Dqp.c717 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
741 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
804 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3)); in build_tpte_memreg()
913 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
915 if (qhp->rhp->db_state == NORMAL) in ring_kernel_sq_db()
918 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_sq_db()
922 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
930 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
932 if (qhp->rhp->db_state == NORMAL) in ring_kernel_rq_db()
935 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_rq_db()
[all …]
H A Dcq.c351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); in c4iw_flush_hw_cq()
803 c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); in __c4iw_poll_cq_one()
844 c4iw_invalidate_mr(qhp->rhp, in __c4iw_poll_cq_one()
933 qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); in c4iw_poll_cq_one()
984 xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); in c4iw_destroy_cq()
990 destroy_cq(&chp->rhp->rdev, &chp->cq, in c4iw_destroy_cq()
1004 struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device); in c4iw_create_cq() local
1021 if (vector >= rhp->rdev.lldi.nciq) in c4iw_create_cq()
1057 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); in c4iw_create_cq()
1079 ret = create_cq(&rhp->rdev, &chp->cq, in c4iw_create_cq()
[all …]
H A Ddevice.c1375 xa_lock_irq(&qp->rhp->qps); in recover_lost_dbs()
1377 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], in recover_lost_dbs()
1385 xa_unlock_irq(&qp->rhp->qps); in recover_lost_dbs()
1390 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], in recover_lost_dbs()
1399 xa_unlock_irq(&qp->rhp->qps); in recover_lost_dbs()
1404 xa_unlock_irq(&qp->rhp->qps); in recover_lost_dbs()
1407 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { in recover_lost_dbs()
H A Dcm.c1645 err = c4iw_modify_qp(ep->com.qp->rhp, in process_mpa_reply()
1660 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply()
1679 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in process_mpa_reply()
1884 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in rx_data()
2760 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close()
2778 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in peer_close()
2808 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in finish_peer_abort()
2912 ret = c4iw_modify_qp(ep->com.qp->rhp, in peer_abort()
3001 c4iw_modify_qp(ep->com.qp->rhp, in close_con_rpl()
3038 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, in terminate()
[all …]
H A Dev.c100 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, in post_qp_event()
/linux/kernel/rcu/
H A Drcu_segcblist.c28 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp) in rcu_cblist_enqueue() argument
30 *rclp->tail = rhp; in rcu_cblist_enqueue()
31 rclp->tail = &rhp->next; in rcu_cblist_enqueue()
45 struct rcu_head *rhp) in rcu_cblist_flush_enqueue() argument
53 if (!rhp) { in rcu_cblist_flush_enqueue()
56 rhp->next = NULL; in rcu_cblist_flush_enqueue()
57 srclp->head = rhp; in rcu_cblist_flush_enqueue()
58 srclp->tail = &rhp->next; in rcu_cblist_flush_enqueue()
69 struct rcu_head *rhp; in rcu_cblist_dequeue() local
71 rhp = rclp->head; in rcu_cblist_dequeue()
[all …]
H A Dsrcutiny.c123 struct rcu_head *rhp; in srcu_drive_gp() local
157 rhp = lh; in srcu_drive_gp()
159 debug_rcu_head_callback(rhp); in srcu_drive_gp()
161 rhp->func(rhp); in srcu_drive_gp()
202 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in call_srcu() argument
207 rhp->func = func; in call_srcu()
208 rhp->next = NULL; in call_srcu()
211 *ssp->srcu_cb_tail = rhp; in call_srcu()
212 ssp->srcu_cb_tail = &rhp->next; in call_srcu()
H A Dtasks.h342 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, in call_rcu_tasks_generic() argument
354 rhp->next = NULL; in call_rcu_tasks_generic()
355 rhp->func = func; in call_rcu_tasks_generic()
386 rcu_segcblist_enqueue(&rtpcp->cblist, rhp); in call_rcu_tasks_generic()
405 static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) in rcu_barrier_tasks_generic_cb() argument
410 rhp->next = rhp; // Mark the callback as having been invoked. in rcu_barrier_tasks_generic_cb()
411 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); in rcu_barrier_tasks_generic_cb()
543 struct rcu_head *rhp; in rcu_tasks_invoke_cbs() local
571 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) { in rcu_tasks_invoke_cbs()
572 debug_rcu_head_callback(rhp); in rcu_tasks_invoke_cbs()
[all …]
H A Dupdate.c516 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, in do_trace_rcu_torture_read() argument
520 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); in do_trace_rcu_torture_read()
524 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ argument
610 struct early_boot_kfree_rcu *rhp; in early_boot_test_call_rcu() local
617 rhp = kmalloc_obj(*rhp); in early_boot_test_call_rcu()
618 if (!WARN_ON_ONCE(!rhp)) in early_boot_test_call_rcu()
619 kfree_rcu(rhp, rh); in early_boot_test_call_rcu()
H A Drcu_segcblist.h24 void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp);
27 struct rcu_head *rhp);
129 struct rcu_head *rhp);
131 struct rcu_head *rhp);
H A Dsrcutree.c1322 static void srcu_leak_callback(struct rcu_head *rhp) in srcu_leak_callback() argument
1330 struct rcu_head *rhp, bool do_norm) in srcu_gp_start_if_needed() argument
1354 if (rhp) in srcu_gp_start_if_needed()
1355 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); in srcu_gp_start_if_needed()
1393 if (rhp) { in srcu_gp_start_if_needed()
1457 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, in __call_srcu() argument
1460 if (debug_rcu_head_queue(rhp)) { in __call_srcu()
1462 WRITE_ONCE(rhp->func, srcu_leak_callback); in __call_srcu()
1466 rhp->func = func; in __call_srcu()
1467 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); in __call_srcu()
[all …]
H A Drcutorture.c1981 static void rcu_torture_timer_cb(struct rcu_head *rhp) in rcu_torture_timer_cb() argument
1983 kfree(rhp); in rcu_torture_timer_cb()
2465 struct rcu_head *rhp = kmalloc_obj(*rhp, GFP_NOWAIT); in rcu_torture_timer() local
2467 if (rhp) in rcu_torture_timer()
2468 cur_ops->call(rhp, rcu_torture_timer_cb); in rcu_torture_timer()
2867 struct rcu_head *rhp; in rcu_torture_mem_dump_obj() local
2874 rhp = kmem_cache_alloc(kcp, GFP_KERNEL); in rcu_torture_mem_dump_obj()
2875 if (WARN_ON_ONCE(!rhp)) { in rcu_torture_mem_dump_obj()
2879 …slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z); in rcu_torture_mem_dump_obj()
2884 pr_alert("mem_dump_obj(%px):", &rhp); in rcu_torture_mem_dump_obj()
[all …]
H A Dsync.c27 static void rcu_sync_func(struct rcu_head *rhp);
57 static void rcu_sync_func(struct rcu_head *rhp) in rcu_sync_func() argument
59 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); in rcu_sync_func()
H A Dtree_nocb.h304 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
312 * Note that this function always returns true if rhp is NULL. in rcu_nocb_do_flush_bypass() local
318 struct rcu_head *rhp = rhp_in; in rcu_nocb_do_flush_bypass()
323 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) { in rcu_nocb_do_flush_bypass()
328 if (rhp) in rcu_nocb_do_flush_bypass()
337 if (lazy && rhp) { in rcu_nocb_do_flush_bypass()
338 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp); in rcu_nocb_do_flush_bypass()
339 rhp = NULL; in rcu_nocb_do_flush_bypass()
341 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp); in rcu_nocb_do_flush_bypass()
351 * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp i
352 rcu_nocb_flush_bypass(struct rcu_data * rdp,struct rcu_head * rhp,unsigned long j,bool lazy) rcu_nocb_flush_bypass() argument
393 rcu_nocb_try_bypass(struct rcu_data * rdp,struct rcu_head * rhp,bool * was_alldone,unsigned long flags,bool lazy) rcu_nocb_try_bypass() argument
1665 rcu_nocb_flush_bypass(struct rcu_data * rdp,struct rcu_head * rhp,unsigned long j,bool lazy) rcu_nocb_flush_bypass() argument
[all...]
H A Dtree.c2551 struct rcu_head *rhp; in rcu_do_batch()
2603 rhp = rcu_cblist_dequeue(&rcl); in rcu_do_batch()
2605 for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) { in rcu_do_batch()
2609 debug_rcu_head_unqueue(rhp); in rcu_do_batch()
2612 trace_rcu_invoke_callback(rcu_state.name, rhp); in rcu_do_batch()
2614 f = rhp->func; in rcu_do_batch()
2615 debug_rcu_head_callback(rhp); in rcu_do_batch()
2616 WRITE_ONCE(rhp->func, (rcu_callback_t)0L); in rcu_do_batch()
2617 f(rhp); in rcu_do_batch()
2539 struct rcu_head *rhp; rcu_do_batch() local
3043 rcu_leak_callback(struct rcu_head * rhp) rcu_leak_callback() argument
3726 rcu_barrier_callback(struct rcu_head * rhp) rcu_barrier_callback() argument
[all...]
H A Dtree.h503 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
H A Drcuscale.c513 static void rcu_scale_async_cb(struct rcu_head *rhp) in rcu_scale_async_cb() argument
515 struct writer_mblock *wmbp = container_of(rhp, struct writer_mblock, wmb_rh); in rcu_scale_async_cb()
/linux/drivers/media/usb/pvrusb2/
H A Dpvrusb2-v4l2.c40 struct pvr2_ioread *rhp; member
894 if (fhp->rhp) { in pvr2_v4l2_release()
897 sp = pvr2_ioread_get_stream(fhp->rhp); in pvr2_v4l2_release()
899 pvr2_ioread_destroy(fhp->rhp); in pvr2_v4l2_release()
900 fhp->rhp = NULL; in pvr2_v4l2_release()
1025 if (fh->rhp) return 0; in pvr2_v4l2_iosetup()
1041 fh->rhp = pvr2_channel_create_mpeg_stream(fh->pdi->stream); in pvr2_v4l2_iosetup()
1042 if (!fh->rhp) { in pvr2_v4l2_iosetup()
1052 return pvr2_ioread_set_enabled(fh->rhp,!0); in pvr2_v4l2_iosetup()
1095 if (!fh->rhp) { in pvr2_v4l2_read()
[all …]
/linux/include/linux/sched/
H A Dmm.h63 static inline void __mmdrop_delayed(struct rcu_head *rhp) in __mmdrop_delayed() argument
65 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); in __mmdrop_delayed()
/linux/kernel/
H A Dpid.c104 static void delayed_put_pid(struct rcu_head *rhp)
106 struct pid *pid = container_of(rhp, struct pid, rcu); in delayed_put_pid() argument
/linux/kernel/irq/
H A Dirqdesc.c462 static void delayed_free_desc(struct rcu_head *rhp) in delayed_free_desc() argument
464 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); in delayed_free_desc()
/linux/Documentation/RCU/
H A Drcuref.rst114 void el_free(struct rcu_head *rhp)
/linux/fs/xfs/
H A Dxfs_log_recover.c2347 struct hlist_head *rhp; in xlog_recover_ophdr_to_trans() local
2350 rhp = &rhash[XLOG_RHASH(tid)]; in xlog_recover_ophdr_to_trans()
2351 hlist_for_each_entry(trans, rhp, r_list) { in xlog_recover_ophdr_to_trans()
2374 hlist_add_head(&trans->r_list, rhp); in xlog_recover_ophdr_to_trans()

12