Lines Matching refs:srq

74 static void *get_wqe(struct mthca_srq *srq, int n)  in get_wqe()  argument
76 if (srq->is_direct) in get_wqe()
77 return srq->queue.direct.buf + (n << srq->wqe_shift); in get_wqe()
79 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + in get_wqe()
80 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); in get_wqe()
99 struct mthca_srq *srq, in mthca_tavor_init_srq_context() argument
108 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); in mthca_tavor_init_srq_context()
110 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context()
120 struct mthca_srq *srq, in mthca_arbel_init_srq_context() argument
129 logsize = ilog2(srq->max); in mthca_arbel_init_srq_context()
130 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); in mthca_arbel_init_srq_context()
131 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_arbel_init_srq_context()
132 context->db_index = cpu_to_be32(srq->db_index); in mthca_arbel_init_srq_context()
133 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); in mthca_arbel_init_srq_context()
141 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) in mthca_free_srq_buf() argument
143 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, in mthca_free_srq_buf()
144 srq->is_direct, &srq->mr); in mthca_free_srq_buf()
145 kfree(srq->wrid); in mthca_free_srq_buf()
149 struct mthca_srq *srq, struct ib_udata *udata) in mthca_alloc_srq_buf() argument
159 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); in mthca_alloc_srq_buf()
160 if (!srq->wrid) in mthca_alloc_srq_buf()
163 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, in mthca_alloc_srq_buf()
165 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); in mthca_alloc_srq_buf()
167 kfree(srq->wrid); in mthca_alloc_srq_buf()
176 for (i = 0; i < srq->max; ++i) { in mthca_alloc_srq_buf()
179 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf()
181 if (i < srq->max - 1) { in mthca_alloc_srq_buf()
183 next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1); in mthca_alloc_srq_buf()
190 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf()
195 srq->last = get_wqe(srq, srq->max - 1); in mthca_alloc_srq_buf()
201 struct ib_srq_attr *attr, struct mthca_srq *srq, in mthca_alloc_srq() argument
213 srq->max = attr->max_wr; in mthca_alloc_srq()
214 srq->max_gs = attr->max_sge; in mthca_alloc_srq()
215 srq->counter = 0; in mthca_alloc_srq()
218 srq->max = roundup_pow_of_two(srq->max + 1); in mthca_alloc_srq()
220 srq->max = srq->max + 1; in mthca_alloc_srq()
224 srq->max_gs * sizeof (struct mthca_data_seg))); in mthca_alloc_srq()
229 srq->wqe_shift = ilog2(ds); in mthca_alloc_srq()
231 srq->srqn = mthca_alloc(&dev->srq_table.alloc); in mthca_alloc_srq()
232 if (srq->srqn == -1) in mthca_alloc_srq()
236 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); in mthca_alloc_srq()
241 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, in mthca_alloc_srq()
242 srq->srqn, &srq->db); in mthca_alloc_srq()
243 if (srq->db_index < 0) { in mthca_alloc_srq()
256 err = mthca_alloc_srq_buf(dev, pd, srq, udata); in mthca_alloc_srq()
260 spin_lock_init(&srq->lock); in mthca_alloc_srq()
261 srq->refcount = 1; in mthca_alloc_srq()
262 init_waitqueue_head(&srq->wait); in mthca_alloc_srq()
263 mutex_init(&srq->mutex); in mthca_alloc_srq()
266 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf, udata); in mthca_alloc_srq()
268 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf, udata); in mthca_alloc_srq()
270 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn); in mthca_alloc_srq()
278 if (mthca_array_set(&dev->srq_table.srq, in mthca_alloc_srq()
279 srq->srqn & (dev->limits.num_srqs - 1), in mthca_alloc_srq()
280 srq)) { in mthca_alloc_srq()
288 srq->first_free = 0; in mthca_alloc_srq()
289 srq->last_free = srq->max - 1; in mthca_alloc_srq()
291 attr->max_wr = srq->max - 1; in mthca_alloc_srq()
292 attr->max_sge = srq->max_gs; in mthca_alloc_srq()
297 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); in mthca_alloc_srq()
303 mthca_free_srq_buf(dev, srq); in mthca_alloc_srq()
310 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); in mthca_alloc_srq()
313 mthca_table_put(dev, dev->srq_table.table, srq->srqn); in mthca_alloc_srq()
316 mthca_free(&dev->srq_table.alloc, srq->srqn); in mthca_alloc_srq()
321 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) in get_srq_refcount() argument
326 c = srq->refcount; in get_srq_refcount()
332 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) in mthca_free_srq() argument
343 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); in mthca_free_srq()
348 mthca_array_clear(&dev->srq_table.srq, in mthca_free_srq()
349 srq->srqn & (dev->limits.num_srqs - 1)); in mthca_free_srq()
350 --srq->refcount; in mthca_free_srq()
353 wait_event(srq->wait, !get_srq_refcount(dev, srq)); in mthca_free_srq()
355 if (!srq->ibsrq.uobject) { in mthca_free_srq()
356 mthca_free_srq_buf(dev, srq); in mthca_free_srq()
358 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); in mthca_free_srq()
361 mthca_table_put(dev, dev->srq_table.table, srq->srqn); in mthca_free_srq()
362 mthca_free(&dev->srq_table.alloc, srq->srqn); in mthca_free_srq()
370 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_modify_srq() local
378 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; in mthca_modify_srq()
382 mutex_lock(&srq->mutex); in mthca_modify_srq()
383 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit); in mthca_modify_srq()
384 mutex_unlock(&srq->mutex); in mthca_modify_srq()
393 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_query_srq() local
403 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox); in mthca_query_srq()
415 srq_attr->max_wr = srq->max - 1; in mthca_query_srq()
416 srq_attr->max_sge = srq->max_gs; in mthca_query_srq()
427 struct mthca_srq *srq; in mthca_srq_event() local
431 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); in mthca_srq_event()
432 if (srq) in mthca_srq_event()
433 ++srq->refcount; in mthca_srq_event()
436 if (!srq) { in mthca_srq_event()
441 if (!srq->ibsrq.event_handler) in mthca_srq_event()
446 event.element.srq = &srq->ibsrq; in mthca_srq_event()
447 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); in mthca_srq_event()
451 if (!--srq->refcount) in mthca_srq_event()
452 wake_up(&srq->wait); in mthca_srq_event()
459 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) in mthca_free_srq_wqe() argument
464 ind = wqe_addr >> srq->wqe_shift; in mthca_free_srq_wqe()
466 spin_lock(&srq->lock); in mthca_free_srq_wqe()
468 last_free = get_wqe(srq, srq->last_free); in mthca_free_srq_wqe()
470 last_free->nda_op = htonl((ind << srq->wqe_shift) | 1); in mthca_free_srq_wqe()
471 *wqe_to_link(get_wqe(srq, ind)) = -1; in mthca_free_srq_wqe()
472 srq->last_free = ind; in mthca_free_srq_wqe()
474 spin_unlock(&srq->lock); in mthca_free_srq_wqe()
481 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_tavor_post_srq_recv() local
492 spin_lock_irqsave(&srq->lock, flags); in mthca_tavor_post_srq_recv()
494 first_ind = srq->first_free; in mthca_tavor_post_srq_recv()
497 ind = srq->first_free; in mthca_tavor_post_srq_recv()
498 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv()
502 mthca_err(dev, "SRQ %06x full\n", srq->srqn); in mthca_tavor_post_srq_recv()
508 prev_wqe = srq->last; in mthca_tavor_post_srq_recv()
509 srq->last = wqe; in mthca_tavor_post_srq_recv()
516 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_tavor_post_srq_recv()
519 srq->last = prev_wqe; in mthca_tavor_post_srq_recv()
528 if (i < srq->max_gs) in mthca_tavor_post_srq_recv()
534 srq->wrid[ind] = wr->wr_id; in mthca_tavor_post_srq_recv()
535 srq->first_free = next_ind; in mthca_tavor_post_srq_recv()
547 mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8, in mthca_tavor_post_srq_recv()
551 first_ind = srq->first_free; in mthca_tavor_post_srq_recv()
562 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv()
573 spin_unlock_irqrestore(&srq->lock, flags); in mthca_tavor_post_srq_recv()
581 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_arbel_post_srq_recv() local
590 spin_lock_irqsave(&srq->lock, flags); in mthca_arbel_post_srq_recv()
593 ind = srq->first_free; in mthca_arbel_post_srq_recv()
594 wqe = get_wqe(srq, ind); in mthca_arbel_post_srq_recv()
598 mthca_err(dev, "SRQ %06x full\n", srq->srqn); in mthca_arbel_post_srq_recv()
609 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_arbel_post_srq_recv()
620 if (i < srq->max_gs) in mthca_arbel_post_srq_recv()
623 srq->wrid[ind] = wr->wr_id; in mthca_arbel_post_srq_recv()
624 srq->first_free = next_ind; in mthca_arbel_post_srq_recv()
628 srq->counter += nreq; in mthca_arbel_post_srq_recv()
635 *srq->db = cpu_to_be32(srq->counter); in mthca_arbel_post_srq_recv()
638 spin_unlock_irqrestore(&srq->lock, flags); in mthca_arbel_post_srq_recv()
683 err = mthca_array_init(&dev->srq_table.srq, in mthca_init_srq_table()
696 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); in mthca_cleanup_srq_table()