xref: /linux/drivers/scsi/elx/efct/efct_hw_queues.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
4  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 #include "efct_driver.h"
8 #include "efct_hw.h"
9 #include "efct_unsol.h"
10 
11 int
efct_hw_init_queues(struct efct_hw * hw)12 efct_hw_init_queues(struct efct_hw *hw)
13 {
14 	struct hw_eq *eq = NULL;
15 	struct hw_cq *cq = NULL;
16 	struct hw_wq *wq = NULL;
17 	struct hw_mq *mq = NULL;
18 
19 	struct hw_eq *eqs[EFCT_HW_MAX_NUM_EQ];
20 	struct hw_cq *cqs[EFCT_HW_MAX_NUM_EQ];
21 	struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ];
22 	u32 i = 0, j;
23 
24 	hw->eq_count = 0;
25 	hw->cq_count = 0;
26 	hw->mq_count = 0;
27 	hw->wq_count = 0;
28 	hw->rq_count = 0;
29 	hw->hw_rq_count = 0;
30 	INIT_LIST_HEAD(&hw->eq_list);
31 
32 	for (i = 0; i < hw->config.n_eq; i++) {
33 		/* Create EQ */
34 		eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH);
35 		if (!eq) {
36 			efct_hw_queue_teardown(hw);
37 			return -ENOMEM;
38 		}
39 
40 		eqs[i] = eq;
41 
42 		/* Create one MQ */
43 		if (!i) {
44 			cq = efct_hw_new_cq(eq,
45 					    hw->num_qentries[SLI4_QTYPE_CQ]);
46 			if (!cq) {
47 				efct_hw_queue_teardown(hw);
48 				return -ENOMEM;
49 			}
50 
51 			mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH);
52 			if (!mq) {
53 				efct_hw_queue_teardown(hw);
54 				return -ENOMEM;
55 			}
56 		}
57 
58 		/* Create WQ */
59 		cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]);
60 		if (!cq) {
61 			efct_hw_queue_teardown(hw);
62 			return -ENOMEM;
63 		}
64 
65 		wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]);
66 		if (!wq) {
67 			efct_hw_queue_teardown(hw);
68 			return -ENOMEM;
69 		}
70 	}
71 
72 	/* Create CQ set */
73 	if (efct_hw_new_cq_set(eqs, cqs, i, hw->num_qentries[SLI4_QTYPE_CQ])) {
74 		efct_hw_queue_teardown(hw);
75 		return -EIO;
76 	}
77 
78 	/* Create RQ set */
79 	if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) {
80 		efct_hw_queue_teardown(hw);
81 		return -EIO;
82 	}
83 
84 	for (j = 0; j < i ; j++) {
85 		rqs[j]->filter_mask = 0;
86 		rqs[j]->is_mrq = true;
87 		rqs[j]->base_mrq_id = rqs[0]->hdr->id;
88 	}
89 
90 	hw->hw_mrq_count = i;
91 
92 	return 0;
93 }
94 
95 int
efct_hw_map_wq_cpu(struct efct_hw * hw)96 efct_hw_map_wq_cpu(struct efct_hw *hw)
97 {
98 	struct efct *efct = hw->os;
99 	u32 cpu = 0, i;
100 
101 	/* Init cpu_map array */
102 	hw->wq_cpu_array = kcalloc(num_possible_cpus(), sizeof(void *),
103 				   GFP_KERNEL);
104 	if (!hw->wq_cpu_array)
105 		return -ENOMEM;
106 
107 	for (i = 0; i < hw->config.n_eq; i++) {
108 		const struct cpumask *maskp;
109 
110 		/* Get a CPU mask for all CPUs affinitized to this vector */
111 		maskp = pci_irq_get_affinity(efct->pci, i);
112 		if (!maskp) {
113 			efc_log_debug(efct, "maskp null for vector:%d\n", i);
114 			continue;
115 		}
116 
117 		/* Loop through all CPUs associated with vector idx */
118 		for_each_cpu_and(cpu, maskp, cpu_present_mask) {
119 			efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
120 			hw->wq_cpu_array[cpu] = hw->hw_wq[i];
121 		}
122 	}
123 
124 	return 0;
125 }
126 
127 struct hw_eq *
efct_hw_new_eq(struct efct_hw * hw,u32 entry_count)128 efct_hw_new_eq(struct efct_hw *hw, u32 entry_count)
129 {
130 	struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL);
131 
132 	if (!eq)
133 		return NULL;
134 
135 	eq->type = SLI4_QTYPE_EQ;
136 	eq->hw = hw;
137 	eq->entry_count = entry_count;
138 	eq->instance = hw->eq_count++;
139 	eq->queue = &hw->eq[eq->instance];
140 	INIT_LIST_HEAD(&eq->cq_list);
141 
142 	if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_EQ, eq->queue,	entry_count,
143 			    NULL)) {
144 		efc_log_err(hw->os, "EQ[%d] alloc failure\n", eq->instance);
145 		kfree(eq);
146 		return NULL;
147 	}
148 
149 	sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
150 	hw->hw_eq[eq->instance] = eq;
151 	INIT_LIST_HEAD(&eq->list_entry);
152 	list_add_tail(&eq->list_entry, &hw->eq_list);
153 	efc_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance,
154 		      eq->queue->id, eq->entry_count);
155 	return eq;
156 }
157 
158 struct hw_cq *
efct_hw_new_cq(struct hw_eq * eq,u32 entry_count)159 efct_hw_new_cq(struct hw_eq *eq, u32 entry_count)
160 {
161 	struct efct_hw *hw = eq->hw;
162 	struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL);
163 
164 	if (!cq)
165 		return NULL;
166 
167 	cq->eq = eq;
168 	cq->type = SLI4_QTYPE_CQ;
169 	cq->instance = eq->hw->cq_count++;
170 	cq->entry_count = entry_count;
171 	cq->queue = &hw->cq[cq->instance];
172 
173 	INIT_LIST_HEAD(&cq->q_list);
174 
175 	if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_CQ, cq->queue,
176 			    cq->entry_count, eq->queue)) {
177 		efc_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
178 			    eq->instance, eq->entry_count);
179 		kfree(cq);
180 		return NULL;
181 	}
182 
183 	hw->hw_cq[cq->instance] = cq;
184 	INIT_LIST_HEAD(&cq->list_entry);
185 	list_add_tail(&cq->list_entry, &eq->cq_list);
186 	efc_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance,
187 		      cq->queue->id, cq->entry_count);
188 	return cq;
189 }
190 
191 u32
efct_hw_new_cq_set(struct hw_eq * eqs[],struct hw_cq * cqs[],u32 num_cqs,u32 entry_count)192 efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
193 		   u32 num_cqs, u32 entry_count)
194 {
195 	u32 i;
196 	struct efct_hw *hw = eqs[0]->hw;
197 	struct sli4 *sli4 = &hw->sli;
198 	struct hw_cq *cq = NULL;
199 	struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT];
200 	struct sli4_queue *assefct[SLI4_MAX_CQ_SET_COUNT];
201 
202 	/* Initialise CQS pointers to NULL */
203 	for (i = 0; i < num_cqs; i++)
204 		cqs[i] = NULL;
205 
206 	for (i = 0; i < num_cqs; i++) {
207 		cq = kzalloc(sizeof(*cq), GFP_KERNEL);
208 		if (!cq)
209 			goto error;
210 
211 		cqs[i]          = cq;
212 		cq->eq          = eqs[i];
213 		cq->type        = SLI4_QTYPE_CQ;
214 		cq->instance    = hw->cq_count++;
215 		cq->entry_count = entry_count;
216 		cq->queue       = &hw->cq[cq->instance];
217 		qs[i]           = cq->queue;
218 		assefct[i]       = eqs[i]->queue;
219 		INIT_LIST_HEAD(&cq->q_list);
220 	}
221 
222 	if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) {
223 		efc_log_err(hw->os, "Failed to create CQ Set.\n");
224 		goto error;
225 	}
226 
227 	for (i = 0; i < num_cqs; i++) {
228 		hw->hw_cq[cqs[i]->instance] = cqs[i];
229 		INIT_LIST_HEAD(&cqs[i]->list_entry);
230 		list_add_tail(&cqs[i]->list_entry, &cqs[i]->eq->cq_list);
231 	}
232 
233 	return 0;
234 
235 error:
236 	for (i = 0; i < num_cqs; i++) {
237 		kfree(cqs[i]);
238 		cqs[i] = NULL;
239 	}
240 	return -EIO;
241 }
242 
243 struct hw_mq *
efct_hw_new_mq(struct hw_cq * cq,u32 entry_count)244 efct_hw_new_mq(struct hw_cq *cq, u32 entry_count)
245 {
246 	struct efct_hw *hw = cq->eq->hw;
247 	struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
248 
249 	if (!mq)
250 		return NULL;
251 
252 	mq->cq = cq;
253 	mq->type = SLI4_QTYPE_MQ;
254 	mq->instance = cq->eq->hw->mq_count++;
255 	mq->entry_count = entry_count;
256 	mq->entry_size = EFCT_HW_MQ_DEPTH;
257 	mq->queue = &hw->mq[mq->instance];
258 
259 	if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_MQ, mq->queue, mq->entry_size,
260 			    cq->queue)) {
261 		efc_log_err(hw->os, "MQ allocation failure\n");
262 		kfree(mq);
263 		return NULL;
264 	}
265 
266 	hw->hw_mq[mq->instance] = mq;
267 	INIT_LIST_HEAD(&mq->list_entry);
268 	list_add_tail(&mq->list_entry, &cq->q_list);
269 	efc_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance,
270 		      mq->queue->id, mq->entry_count);
271 	return mq;
272 }
273 
274 struct hw_wq *
efct_hw_new_wq(struct hw_cq * cq,u32 entry_count)275 efct_hw_new_wq(struct hw_cq *cq, u32 entry_count)
276 {
277 	struct efct_hw *hw = cq->eq->hw;
278 	struct hw_wq *wq = kzalloc(sizeof(*wq), GFP_KERNEL);
279 
280 	if (!wq)
281 		return NULL;
282 
283 	wq->hw = cq->eq->hw;
284 	wq->cq = cq;
285 	wq->type = SLI4_QTYPE_WQ;
286 	wq->instance = cq->eq->hw->wq_count++;
287 	wq->entry_count = entry_count;
288 	wq->queue = &hw->wq[wq->instance];
289 	wq->wqec_set_count = EFCT_HW_WQEC_SET_COUNT;
290 	wq->wqec_count = wq->wqec_set_count;
291 	wq->free_count = wq->entry_count - 1;
292 	INIT_LIST_HEAD(&wq->pending_list);
293 
294 	if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_WQ, wq->queue,
295 			    wq->entry_count, cq->queue)) {
296 		efc_log_err(hw->os, "WQ allocation failure\n");
297 		kfree(wq);
298 		return NULL;
299 	}
300 
301 	hw->hw_wq[wq->instance] = wq;
302 	INIT_LIST_HEAD(&wq->list_entry);
303 	list_add_tail(&wq->list_entry, &cq->q_list);
304 	efc_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d\n",
305 		      wq->instance, wq->queue->id, wq->entry_count, wq->class);
306 	return wq;
307 }
308 
309 u32
efct_hw_new_rq_set(struct hw_cq * cqs[],struct hw_rq * rqs[],u32 num_rq_pairs,u32 entry_count)310 efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
311 		   u32 num_rq_pairs, u32 entry_count)
312 {
313 	struct efct_hw *hw = cqs[0]->eq->hw;
314 	struct hw_rq *rq = NULL;
315 	struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL };
316 	u32 i, q_count, size;
317 
318 	/* Initialise RQS pointers */
319 	for (i = 0; i < num_rq_pairs; i++)
320 		rqs[i] = NULL;
321 
322 	/*
323 	 * Allocate an RQ object SET, where each element in set
324 	 * encapsulates 2 SLI queues (for rq pair)
325 	 */
326 	for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
327 		rq = kzalloc(sizeof(*rq), GFP_KERNEL);
328 		if (!rq)
329 			goto error;
330 
331 		rqs[i] = rq;
332 		rq->instance = hw->hw_rq_count++;
333 		rq->cq = cqs[i];
334 		rq->type = SLI4_QTYPE_RQ;
335 		rq->entry_count = entry_count;
336 
337 		/* Header RQ */
338 		rq->hdr = &hw->rq[hw->rq_count];
339 		rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE;
340 		hw->hw_rq_lookup[hw->rq_count] = rq->instance;
341 		hw->rq_count++;
342 		qs[q_count] = rq->hdr;
343 
344 		/* Data RQ */
345 		rq->data = &hw->rq[hw->rq_count];
346 		rq->data_entry_size = hw->config.rq_default_buffer_size;
347 		hw->hw_rq_lookup[hw->rq_count] = rq->instance;
348 		hw->rq_count++;
349 		qs[q_count + 1] = rq->data;
350 
351 		rq->rq_tracker = NULL;
352 	}
353 
354 	if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
355 				cqs[0]->queue->id,
356 			    rqs[0]->entry_count,
357 			    rqs[0]->hdr_entry_size,
358 			    rqs[0]->data_entry_size)) {
359 		efc_log_err(hw->os, "RQ Set alloc failure for base CQ=%d\n",
360 			    cqs[0]->queue->id);
361 		goto error;
362 	}
363 
364 	for (i = 0; i < num_rq_pairs; i++) {
365 		hw->hw_rq[rqs[i]->instance] = rqs[i];
366 		INIT_LIST_HEAD(&rqs[i]->list_entry);
367 		list_add_tail(&rqs[i]->list_entry, &cqs[i]->q_list);
368 		size = sizeof(struct efc_hw_sequence *) * rqs[i]->entry_count;
369 		rqs[i]->rq_tracker = kzalloc(size, GFP_KERNEL);
370 		if (!rqs[i]->rq_tracker)
371 			goto error;
372 	}
373 
374 	return 0;
375 
376 error:
377 	for (i = 0; i < num_rq_pairs; i++) {
378 		if (rqs[i]) {
379 			kfree(rqs[i]->rq_tracker);
380 			kfree(rqs[i]);
381 		}
382 	}
383 
384 	return -EIO;
385 }
386 
387 void
efct_hw_del_eq(struct hw_eq * eq)388 efct_hw_del_eq(struct hw_eq *eq)
389 {
390 	struct hw_cq *cq;
391 	struct hw_cq *cq_next;
392 
393 	if (!eq)
394 		return;
395 
396 	list_for_each_entry_safe(cq, cq_next, &eq->cq_list, list_entry)
397 		efct_hw_del_cq(cq);
398 	list_del(&eq->list_entry);
399 	eq->hw->hw_eq[eq->instance] = NULL;
400 	kfree(eq);
401 }
402 
403 void
efct_hw_del_cq(struct hw_cq * cq)404 efct_hw_del_cq(struct hw_cq *cq)
405 {
406 	struct hw_q *q;
407 	struct hw_q *q_next;
408 
409 	if (!cq)
410 		return;
411 
412 	list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) {
413 		switch (q->type) {
414 		case SLI4_QTYPE_MQ:
415 			efct_hw_del_mq((struct hw_mq *)q);
416 			break;
417 		case SLI4_QTYPE_WQ:
418 			efct_hw_del_wq((struct hw_wq *)q);
419 			break;
420 		case SLI4_QTYPE_RQ:
421 			efct_hw_del_rq((struct hw_rq *)q);
422 			break;
423 		default:
424 			break;
425 		}
426 	}
427 	list_del(&cq->list_entry);
428 	cq->eq->hw->hw_cq[cq->instance] = NULL;
429 	kfree(cq);
430 }
431 
432 void
efct_hw_del_mq(struct hw_mq * mq)433 efct_hw_del_mq(struct hw_mq *mq)
434 {
435 	if (!mq)
436 		return;
437 
438 	list_del(&mq->list_entry);
439 	mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
440 	kfree(mq);
441 }
442 
443 void
efct_hw_del_wq(struct hw_wq * wq)444 efct_hw_del_wq(struct hw_wq *wq)
445 {
446 	if (!wq)
447 		return;
448 
449 	list_del(&wq->list_entry);
450 	wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
451 	kfree(wq);
452 }
453 
454 void
efct_hw_del_rq(struct hw_rq * rq)455 efct_hw_del_rq(struct hw_rq *rq)
456 {
457 	struct efct_hw *hw = NULL;
458 
459 	if (!rq)
460 		return;
461 	/* Free RQ tracker */
462 	kfree(rq->rq_tracker);
463 	rq->rq_tracker = NULL;
464 	list_del(&rq->list_entry);
465 	hw = rq->cq->eq->hw;
466 	hw->hw_rq[rq->instance] = NULL;
467 	kfree(rq);
468 }
469 
470 void
efct_hw_queue_teardown(struct efct_hw * hw)471 efct_hw_queue_teardown(struct efct_hw *hw)
472 {
473 	struct hw_eq *eq;
474 	struct hw_eq *eq_next;
475 
476 	if (!hw->eq_list.next)
477 		return;
478 
479 	list_for_each_entry_safe(eq, eq_next, &hw->eq_list, list_entry)
480 		efct_hw_del_eq(eq);
481 }
482 
483 static inline int
efct_hw_rqpair_find(struct efct_hw * hw,u16 rq_id)484 efct_hw_rqpair_find(struct efct_hw *hw, u16 rq_id)
485 {
486 	return efct_hw_queue_hash_find(hw->rq_hash, rq_id);
487 }
488 
489 static struct efc_hw_sequence *
efct_hw_rqpair_get(struct efct_hw * hw,u16 rqindex,u16 bufindex)490 efct_hw_rqpair_get(struct efct_hw *hw, u16 rqindex, u16 bufindex)
491 {
492 	struct sli4_queue *rq_hdr = &hw->rq[rqindex];
493 	struct efc_hw_sequence *seq = NULL;
494 	struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
495 	unsigned long flags = 0;
496 
497 	if (bufindex >= rq_hdr->length) {
498 		efc_log_err(hw->os,
499 			    "RQidx %d bufidx %d exceed ring len %d for id %d\n",
500 			    rqindex, bufindex, rq_hdr->length, rq_hdr->id);
501 		return NULL;
502 	}
503 
504 	/* rq_hdr lock also covers rqindex+1 queue */
505 	spin_lock_irqsave(&rq_hdr->lock, flags);
506 
507 	seq = rq->rq_tracker[bufindex];
508 	rq->rq_tracker[bufindex] = NULL;
509 
510 	if (!seq) {
511 		efc_log_err(hw->os,
512 			    "RQbuf NULL, rqidx %d, bufidx %d, cur q idx = %d\n",
513 			    rqindex, bufindex, rq_hdr->index);
514 	}
515 
516 	spin_unlock_irqrestore(&rq_hdr->lock, flags);
517 	return seq;
518 }
519 
520 int
efct_hw_rqpair_process_rq(struct efct_hw * hw,struct hw_cq * cq,u8 * cqe)521 efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq,
522 			  u8 *cqe)
523 {
524 	u16 rq_id;
525 	u32 index;
526 	int rqindex;
527 	int rq_status;
528 	u32 h_len;
529 	u32 p_len;
530 	struct efc_hw_sequence *seq;
531 	struct hw_rq *rq;
532 
533 	rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe,
534 					      &rq_id, &index);
535 	if (rq_status != 0) {
536 		switch (rq_status) {
537 		case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
538 		case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
539 			/* just get RQ buffer then return to chip */
540 			rqindex = efct_hw_rqpair_find(hw, rq_id);
541 			if (rqindex < 0) {
542 				efc_log_debug(hw->os,
543 					      "status=%#x: lookup fail id=%#x\n",
544 					     rq_status, rq_id);
545 				break;
546 			}
547 
548 			/* get RQ buffer */
549 			seq = efct_hw_rqpair_get(hw, rqindex, index);
550 
551 			/* return to chip */
552 			if (efct_hw_rqpair_sequence_free(hw, seq)) {
553 				efc_log_debug(hw->os,
554 					      "status=%#x,fail rtrn buf to RQ\n",
555 					     rq_status);
556 				break;
557 			}
558 			break;
559 		case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
560 		case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
561 			/*
562 			 * since RQ buffers were not consumed, cannot return
563 			 * them to chip
564 			 */
565 			efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n",
566 				      rq_status);
567 			fallthrough;
568 		default:
569 			break;
570 		}
571 		return -EIO;
572 	}
573 
574 	rqindex = efct_hw_rqpair_find(hw, rq_id);
575 	if (rqindex < 0) {
576 		efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n",
577 			      rq_id);
578 		return -EIO;
579 	}
580 
581 	rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
582 	rq->use_count++;
583 
584 	seq = efct_hw_rqpair_get(hw, rqindex, index);
585 	if (WARN_ON(!seq))
586 		return -EIO;
587 
588 	seq->hw = hw;
589 
590 	sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
591 	seq->header->dma.len = h_len;
592 	seq->payload->dma.len = p_len;
593 	seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
594 	seq->hw_priv = cq->eq;
595 
596 	efct_unsolicited_cb(hw->os, seq);
597 
598 	return 0;
599 }
600 
601 static int
efct_hw_rqpair_put(struct efct_hw * hw,struct efc_hw_sequence * seq)602 efct_hw_rqpair_put(struct efct_hw *hw, struct efc_hw_sequence *seq)
603 {
604 	struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex];
605 	struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex];
606 	u32 hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
607 	struct hw_rq *rq = hw->hw_rq[hw_rq_index];
608 	u32 phys_hdr[2];
609 	u32 phys_payload[2];
610 	int qindex_hdr;
611 	int qindex_payload;
612 	unsigned long flags = 0;
613 
614 	/* Update the RQ verification lookup tables */
615 	phys_hdr[0] = upper_32_bits(seq->header->dma.phys);
616 	phys_hdr[1] = lower_32_bits(seq->header->dma.phys);
617 	phys_payload[0] = upper_32_bits(seq->payload->dma.phys);
618 	phys_payload[1] = lower_32_bits(seq->payload->dma.phys);
619 
620 	/* rq_hdr lock also covers payload / header->rqindex+1 queue */
621 	spin_lock_irqsave(&rq_hdr->lock, flags);
622 
623 	/*
624 	 * Note: The header must be posted last for buffer pair mode because
625 	 *       posting on the header queue posts the payload queue as well.
626 	 *       We do not ring the payload queue independently in RQ pair mode.
627 	 */
628 	qindex_payload = sli_rq_write(&hw->sli, rq_payload,
629 				      (void *)phys_payload);
630 	qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr);
631 	if (qindex_hdr < 0 ||
632 	    qindex_payload < 0) {
633 		efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
634 		spin_unlock_irqrestore(&rq_hdr->lock, flags);
635 		return -EIO;
636 	}
637 
638 	/* ensure the indexes are the same */
639 	WARN_ON(qindex_hdr != qindex_payload);
640 
641 	/* Update the lookup table */
642 	if (!rq->rq_tracker[qindex_hdr]) {
643 		rq->rq_tracker[qindex_hdr] = seq;
644 	} else {
645 		efc_log_debug(hw->os,
646 			      "expected rq_tracker[%d][%d] buffer to be NULL\n",
647 			      hw_rq_index, qindex_hdr);
648 	}
649 
650 	spin_unlock_irqrestore(&rq_hdr->lock, flags);
651 	return 0;
652 }
653 
654 int
efct_hw_rqpair_sequence_free(struct efct_hw * hw,struct efc_hw_sequence * seq)655 efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
656 {
657 	int rc = 0;
658 
659 	/*
660 	 * Post the data buffer first. Because in RQ pair mode, ringing the
661 	 * doorbell of the header ring will post the data buffer as well.
662 	 */
663 	if (efct_hw_rqpair_put(hw, seq)) {
664 		efc_log_err(hw->os, "error writing buffers\n");
665 		return -EIO;
666 	}
667 
668 	return rc;
669 }
670 
671 int
efct_efc_hw_sequence_free(struct efc * efc,struct efc_hw_sequence * seq)672 efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq)
673 {
674 	struct efct *efct = efc->base;
675 
676 	return efct_hw_rqpair_sequence_free(&efct->hw, seq);
677 }
678