1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2007-2017 QLogic Corporation. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 #include "bxe.h"
31 #include "ecore_init.h"
32
33
34
35
36 /**** Exe Queue interfaces ****/
37
38 /**
39 * ecore_exe_queue_init - init the Exe Queue object
40 *
41 * @o: pointer to the object
42 * @exe_len: length
43 * @owner: pointer to the owner
44 * @validate: validate function pointer
45 * @optimize: optimize function pointer
46 * @exec: execute function pointer
47 * @get: get function pointer
48 */
ecore_exe_queue_init(struct bxe_softc * sc,struct ecore_exe_queue_obj * o,int exe_len,union ecore_qable_obj * owner,exe_q_validate validate,exe_q_remove remove,exe_q_optimize optimize,exe_q_execute exec,exe_q_get get)49 static inline void ecore_exe_queue_init(struct bxe_softc *sc,
50 struct ecore_exe_queue_obj *o,
51 int exe_len,
52 union ecore_qable_obj *owner,
53 exe_q_validate validate,
54 exe_q_remove remove,
55 exe_q_optimize optimize,
56 exe_q_execute exec,
57 exe_q_get get)
58 {
59 ECORE_MEMSET(o, 0, sizeof(*o));
60
61 ECORE_LIST_INIT(&o->exe_queue);
62 ECORE_LIST_INIT(&o->pending_comp);
63
64 ECORE_SPIN_LOCK_INIT(&o->lock, sc);
65
66 o->exe_chunk_len = exe_len;
67 o->owner = owner;
68
69 /* Owner specific callbacks */
70 o->validate = validate;
71 o->remove = remove;
72 o->optimize = optimize;
73 o->execute = exec;
74 o->get = get;
75
76 ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d\n",
77 exe_len);
78 }
79
ecore_exe_queue_free_elem(struct bxe_softc * sc,struct ecore_exeq_elem * elem)80 static inline void ecore_exe_queue_free_elem(struct bxe_softc *sc,
81 struct ecore_exeq_elem *elem)
82 {
83 ECORE_MSG(sc, "Deleting an exe_queue element\n");
84 ECORE_FREE(sc, elem, sizeof(*elem));
85 }
86
ecore_exe_queue_length(struct ecore_exe_queue_obj * o)87 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
88 {
89 struct ecore_exeq_elem *elem;
90 int cnt = 0;
91
92 ECORE_SPIN_LOCK_BH(&o->lock);
93
94 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
95 struct ecore_exeq_elem)
96 cnt++;
97
98 ECORE_SPIN_UNLOCK_BH(&o->lock);
99
100 return cnt;
101 }
102
103 /**
104 * ecore_exe_queue_add - add a new element to the execution queue
105 *
106 * @sc: driver handle
107 * @o: queue
108 * @cmd: new command to add
109 * @restore: true - do not optimize the command
110 *
111 * If the element is optimized or is illegal, frees it.
112 */
ecore_exe_queue_add(struct bxe_softc * sc,struct ecore_exe_queue_obj * o,struct ecore_exeq_elem * elem,bool restore)113 static inline int ecore_exe_queue_add(struct bxe_softc *sc,
114 struct ecore_exe_queue_obj *o,
115 struct ecore_exeq_elem *elem,
116 bool restore)
117 {
118 int rc;
119
120 ECORE_SPIN_LOCK_BH(&o->lock);
121
122 if (!restore) {
123 /* Try to cancel this element queue */
124 rc = o->optimize(sc, o->owner, elem);
125 if (rc)
126 goto free_and_exit;
127
128 /* Check if this request is ok */
129 rc = o->validate(sc, o->owner, elem);
130 if (rc) {
131 ECORE_MSG(sc, "Preamble failed: %d\n", rc);
132 goto free_and_exit;
133 }
134 }
135
136 /* If so, add it to the execution queue */
137 ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
138
139 ECORE_SPIN_UNLOCK_BH(&o->lock);
140
141 return ECORE_SUCCESS;
142
143 free_and_exit:
144 ecore_exe_queue_free_elem(sc, elem);
145
146 ECORE_SPIN_UNLOCK_BH(&o->lock);
147
148 return rc;
149 }
150
__ecore_exe_queue_reset_pending(struct bxe_softc * sc,struct ecore_exe_queue_obj * o)151 static inline void __ecore_exe_queue_reset_pending(
152 struct bxe_softc *sc,
153 struct ecore_exe_queue_obj *o)
154 {
155 struct ecore_exeq_elem *elem;
156
157 while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
158 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
159 struct ecore_exeq_elem,
160 link);
161
162 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
163 ecore_exe_queue_free_elem(sc, elem);
164 }
165 }
166
167 /**
168 * ecore_exe_queue_step - execute one execution chunk atomically
169 *
170 * @sc: driver handle
171 * @o: queue
172 * @ramrod_flags: flags
173 *
174 * (Should be called while holding the exe_queue->lock).
175 */
ecore_exe_queue_step(struct bxe_softc * sc,struct ecore_exe_queue_obj * o,unsigned long * ramrod_flags)176 static inline int ecore_exe_queue_step(struct bxe_softc *sc,
177 struct ecore_exe_queue_obj *o,
178 unsigned long *ramrod_flags)
179 {
180 struct ecore_exeq_elem *elem, spacer;
181 int cur_len = 0, rc;
182
183 ECORE_MEMSET(&spacer, 0, sizeof(spacer));
184
185 /* Next step should not be performed until the current is finished,
186 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
187 * properly clear object internals without sending any command to the FW
188 * which also implies there won't be any completion to clear the
189 * 'pending' list.
190 */
191 if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
192 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
193 ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
194 __ecore_exe_queue_reset_pending(sc, o);
195 } else {
196 return ECORE_PENDING;
197 }
198 }
199
200 /* Run through the pending commands list and create a next
201 * execution chunk.
202 */
203 while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
204 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
205 struct ecore_exeq_elem,
206 link);
207 ECORE_DBG_BREAK_IF(!elem->cmd_len);
208
209 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
210 cur_len += elem->cmd_len;
211 /* Prevent from both lists being empty when moving an
212 * element. This will allow the call of
213 * ecore_exe_queue_empty() without locking.
214 */
215 ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
216 mb();
217 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
218 ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
219 ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
220 } else
221 break;
222 }
223
224 /* Sanity check */
225 if (!cur_len)
226 return ECORE_SUCCESS;
227
228 rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
229 if (rc < 0)
230 /* In case of an error return the commands back to the queue
231 * and reset the pending_comp.
232 */
233 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
234 else if (!rc)
235 /* If zero is returned, means there are no outstanding pending
236 * completions and we may dismiss the pending list.
237 */
238 __ecore_exe_queue_reset_pending(sc, o);
239
240 return rc;
241 }
242
ecore_exe_queue_empty(struct ecore_exe_queue_obj * o)243 static inline bool ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
244 {
245 bool empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
246
247 /* Don't reorder!!! */
248 mb();
249
250 return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
251 }
252
ecore_exe_queue_alloc_elem(struct bxe_softc * sc)253 static inline struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(
254 struct bxe_softc *sc)
255 {
256 ECORE_MSG(sc, "Allocating a new exe_queue element\n");
257 return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC,
258 sc);
259 }
260
261 /************************ raw_obj functions ***********************************/
ecore_raw_check_pending(struct ecore_raw_obj * o)262 static bool ecore_raw_check_pending(struct ecore_raw_obj *o)
263 {
264 /*
265 * !! converts the value returned by ECORE_TEST_BIT such that it
266 * is guaranteed not to be truncated regardless of bool definition.
267 *
268 * Note we cannot simply define the function's return value type
269 * to match the type returned by ECORE_TEST_BIT, as it varies by
270 * platform/implementation.
271 */
272
273 return !!ECORE_TEST_BIT(o->state, o->pstate);
274 }
275
ecore_raw_clear_pending(struct ecore_raw_obj * o)276 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
277 {
278 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
279 ECORE_CLEAR_BIT(o->state, o->pstate);
280 ECORE_SMP_MB_AFTER_CLEAR_BIT();
281 }
282
ecore_raw_set_pending(struct ecore_raw_obj * o)283 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
284 {
285 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
286 ECORE_SET_BIT(o->state, o->pstate);
287 ECORE_SMP_MB_AFTER_CLEAR_BIT();
288 }
289
290 /**
291 * ecore_state_wait - wait until the given bit(state) is cleared
292 *
293 * @sc: device handle
294 * @state: state which is to be cleared
295 * @state_p: state buffer
296 *
297 */
ecore_state_wait(struct bxe_softc * sc,int state,unsigned long * pstate)298 static inline int ecore_state_wait(struct bxe_softc *sc, int state,
299 unsigned long *pstate)
300 {
301 /* can take a while if any port is running */
302 int cnt = 5000;
303
304
305 if (CHIP_REV_IS_EMUL(sc))
306 cnt *= 20;
307
308 ECORE_MSG(sc, "waiting for state to become %d\n", state);
309
310 ECORE_MIGHT_SLEEP();
311 while (cnt--) {
312 if (!ECORE_TEST_BIT(state, pstate)) {
313 #ifdef ECORE_STOP_ON_ERROR
314 ECORE_MSG(sc, "exit (cnt %d)\n", 5000 - cnt);
315 #endif
316 return ECORE_SUCCESS;
317 }
318
319 ECORE_WAIT(sc, delay_us);
320
321 if (sc->panic)
322 return ECORE_IO;
323 }
324
325 /* timeout! */
326 ECORE_ERR("timeout waiting for state %d\n", state);
327 #ifdef ECORE_STOP_ON_ERROR
328 ecore_panic();
329 #endif
330
331 return ECORE_TIMEOUT;
332 }
333
ecore_raw_wait(struct bxe_softc * sc,struct ecore_raw_obj * raw)334 static int ecore_raw_wait(struct bxe_softc *sc, struct ecore_raw_obj *raw)
335 {
336 return ecore_state_wait(sc, raw->state, raw->pstate);
337 }
338
339 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
340 /* credit handling callbacks */
ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj * o,int * offset)341 static bool ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
342 {
343 struct ecore_credit_pool_obj *mp = o->macs_pool;
344
345 ECORE_DBG_BREAK_IF(!mp);
346
347 return mp->get_entry(mp, offset);
348 }
349
ecore_get_credit_mac(struct ecore_vlan_mac_obj * o)350 static bool ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
351 {
352 struct ecore_credit_pool_obj *mp = o->macs_pool;
353
354 ECORE_DBG_BREAK_IF(!mp);
355
356 return mp->get(mp, 1);
357 }
358
ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj * o,int * offset)359 static bool ecore_get_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int *offset)
360 {
361 struct ecore_credit_pool_obj *vp = o->vlans_pool;
362
363 ECORE_DBG_BREAK_IF(!vp);
364
365 return vp->get_entry(vp, offset);
366 }
367
ecore_get_credit_vlan(struct ecore_vlan_mac_obj * o)368 static bool ecore_get_credit_vlan(struct ecore_vlan_mac_obj *o)
369 {
370 struct ecore_credit_pool_obj *vp = o->vlans_pool;
371
372 ECORE_DBG_BREAK_IF(!vp);
373
374 return vp->get(vp, 1);
375 }
376
ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj * o)377 static bool ecore_get_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
378 {
379 struct ecore_credit_pool_obj *mp = o->macs_pool;
380 struct ecore_credit_pool_obj *vp = o->vlans_pool;
381
382 if (!mp->get(mp, 1))
383 return FALSE;
384
385 if (!vp->get(vp, 1)) {
386 mp->put(mp, 1);
387 return FALSE;
388 }
389
390 return TRUE;
391 }
392
ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj * o,int offset)393 static bool ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
394 {
395 struct ecore_credit_pool_obj *mp = o->macs_pool;
396
397 return mp->put_entry(mp, offset);
398 }
399
ecore_put_credit_mac(struct ecore_vlan_mac_obj * o)400 static bool ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
401 {
402 struct ecore_credit_pool_obj *mp = o->macs_pool;
403
404 return mp->put(mp, 1);
405 }
406
ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj * o,int offset)407 static bool ecore_put_cam_offset_vlan(struct ecore_vlan_mac_obj *o, int offset)
408 {
409 struct ecore_credit_pool_obj *vp = o->vlans_pool;
410
411 return vp->put_entry(vp, offset);
412 }
413
ecore_put_credit_vlan(struct ecore_vlan_mac_obj * o)414 static bool ecore_put_credit_vlan(struct ecore_vlan_mac_obj *o)
415 {
416 struct ecore_credit_pool_obj *vp = o->vlans_pool;
417
418 return vp->put(vp, 1);
419 }
420
ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj * o)421 static bool ecore_put_credit_vlan_mac(struct ecore_vlan_mac_obj *o)
422 {
423 struct ecore_credit_pool_obj *mp = o->macs_pool;
424 struct ecore_credit_pool_obj *vp = o->vlans_pool;
425
426 if (!mp->put(mp, 1))
427 return FALSE;
428
429 if (!vp->put(vp, 1)) {
430 mp->get(mp, 1);
431 return FALSE;
432 }
433
434 return TRUE;
435 }
436
437 /**
438 * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
439 * head list.
440 *
441 * @sc: device handle
442 * @o: vlan_mac object
443 *
444 * @details: Non-blocking implementation; should be called under execution
445 * queue lock.
446 */
__ecore_vlan_mac_h_write_trylock(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o)447 static int __ecore_vlan_mac_h_write_trylock(struct bxe_softc *sc,
448 struct ecore_vlan_mac_obj *o)
449 {
450 if (o->head_reader) {
451 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy\n");
452 return ECORE_BUSY;
453 }
454
455 ECORE_MSG(sc, "vlan_mac_lock writer - Taken\n");
456 return ECORE_SUCCESS;
457 }
458
459 /**
460 * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
461 * which wasn't able to run due to a taken lock on vlan mac head list.
462 *
463 * @sc: device handle
464 * @o: vlan_mac object
465 *
466 * @details Should be called under execution queue lock; notice it might release
467 * and reclaim it during its run.
468 */
__ecore_vlan_mac_h_exec_pending(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o)469 static void __ecore_vlan_mac_h_exec_pending(struct bxe_softc *sc,
470 struct ecore_vlan_mac_obj *o)
471 {
472 int rc;
473 unsigned long ramrod_flags = o->saved_ramrod_flags;
474
475 ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
476 ramrod_flags);
477 o->head_exe_request = FALSE;
478 o->saved_ramrod_flags = 0;
479 rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
480 if ((rc != ECORE_SUCCESS) && (rc != ECORE_PENDING)) {
481 ECORE_ERR("execution of pending commands failed with rc %d\n",
482 rc);
483 #ifdef ECORE_STOP_ON_ERROR
484 ecore_panic();
485 #endif
486 }
487 }
488
489 /**
490 * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
491 * called due to vlan mac head list lock being taken.
492 *
493 * @sc: device handle
494 * @o: vlan_mac object
495 * @ramrod_flags: ramrod flags of missed execution
496 *
497 * @details Should be called under execution queue lock.
498 */
__ecore_vlan_mac_h_pend(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,unsigned long ramrod_flags)499 static void __ecore_vlan_mac_h_pend(struct bxe_softc *sc,
500 struct ecore_vlan_mac_obj *o,
501 unsigned long ramrod_flags)
502 {
503 o->head_exe_request = TRUE;
504 o->saved_ramrod_flags = ramrod_flags;
505 ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu\n",
506 ramrod_flags);
507 }
508
509 /**
510 * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
511 *
512 * @sc: device handle
513 * @o: vlan_mac object
514 *
515 * @details Should be called under execution queue lock. Notice if a pending
516 * execution exists, it would perform it - possibly releasing and
517 * reclaiming the execution queue lock.
518 */
__ecore_vlan_mac_h_write_unlock(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o)519 static void __ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
520 struct ecore_vlan_mac_obj *o)
521 {
522 /* It's possible a new pending execution was added since this writer
523 * executed. If so, execute again. [Ad infinitum]
524 */
525 while(o->head_exe_request) {
526 ECORE_MSG(sc, "vlan_mac_lock - writer release encountered a pending request\n");
527 __ecore_vlan_mac_h_exec_pending(sc, o);
528 }
529 }
530
531 /**
532 * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
533 *
534 * @sc: device handle
535 * @o: vlan_mac object
536 *
537 * @details Notice if a pending execution exists, it would perform it -
538 * possibly releasing and reclaiming the execution queue lock.
539 */
ecore_vlan_mac_h_write_unlock(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o)540 void ecore_vlan_mac_h_write_unlock(struct bxe_softc *sc,
541 struct ecore_vlan_mac_obj *o)
542 {
543 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
544 __ecore_vlan_mac_h_write_unlock(sc, o);
545 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
546 }
547
548 /**
549 * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
550 *
551 * @sc: device handle
552 * @o: vlan_mac object
553 *
554 * @details Should be called under the execution queue lock. May sleep. May
555 * release and reclaim execution queue lock during its run.
556 */
__ecore_vlan_mac_h_read_lock(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o)557 static int __ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
558 struct ecore_vlan_mac_obj *o)
559 {
560 /* If we got here, we're holding lock --> no WRITER exists */
561 o->head_reader++;
562 ECORE_MSG(sc, "vlan_mac_lock - locked reader - number %d\n",
563 o->head_reader);
564
565 return ECORE_SUCCESS;
566 }
567
568 /**
569 * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
570 *
571 * @sc: device handle
572 * @o: vlan_mac object
573 *
574 * @details May sleep. Claims and releases execution queue lock during its run.
575 */
ecore_vlan_mac_h_read_lock(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o)576 int ecore_vlan_mac_h_read_lock(struct bxe_softc *sc,
577 struct ecore_vlan_mac_obj *o)
578 {
579 int rc;
580
581 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
582 rc = __ecore_vlan_mac_h_read_lock(sc, o);
583 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
584
585 return rc;
586 }
587
588 /**
589 * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
590 *
591 * @sc: device handle
592 * @o: vlan_mac object
593 *
594 * @details Should be called under execution queue lock. Notice if a pending
595 * execution exists, it would be performed if this was the last
596 * reader. possibly releasing and reclaiming the execution queue lock.
597 */
__ecore_vlan_mac_h_read_unlock(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o)598 static void __ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
599 struct ecore_vlan_mac_obj *o)
600 {
601 if (!o->head_reader) {
602 ECORE_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
603 #ifdef ECORE_STOP_ON_ERROR
604 ecore_panic();
605 #endif
606 } else {
607 o->head_reader--;
608 ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d\n",
609 o->head_reader);
610 }
611
612 /* It's possible a new pending execution was added, and that this reader
613 * was last - if so we need to execute the command.
614 */
615 if (!o->head_reader && o->head_exe_request) {
616 ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request\n");
617
618 /* Writer release will do the trick */
619 __ecore_vlan_mac_h_write_unlock(sc, o);
620 }
621 }
622
623 /**
624 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
625 *
626 * @sc: device handle
627 * @o: vlan_mac object
628 *
629 * @details Notice if a pending execution exists, it would be performed if this
630 * was the last reader. Claims and releases the execution queue lock
631 * during its run.
632 */
ecore_vlan_mac_h_read_unlock(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o)633 void ecore_vlan_mac_h_read_unlock(struct bxe_softc *sc,
634 struct ecore_vlan_mac_obj *o)
635 {
636 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
637 __ecore_vlan_mac_h_read_unlock(sc, o);
638 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
639 }
640
641 /**
642 * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
643 *
644 * @sc: device handle
645 * @o: vlan_mac object
646 * @n: number of elements to get
647 * @base: base address for element placement
648 * @stride: stride between elements (in bytes)
649 */
ecore_get_n_elements(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,int n,uint8_t * base,uint8_t stride,uint8_t size)650 static int ecore_get_n_elements(struct bxe_softc *sc, struct ecore_vlan_mac_obj *o,
651 int n, uint8_t *base, uint8_t stride, uint8_t size)
652 {
653 struct ecore_vlan_mac_registry_elem *pos;
654 uint8_t *next = base;
655 int counter = 0;
656 int read_lock;
657
658 ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)\n");
659 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
660 if (read_lock != ECORE_SUCCESS)
661 ECORE_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
662
663 /* traverse list */
664 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
665 struct ecore_vlan_mac_registry_elem) {
666 if (counter < n) {
667 ECORE_MEMCPY(next, &pos->u, size);
668 counter++;
669 ECORE_MSG(sc, "copied element number %d to address %p element was:\n",
670 counter, next);
671 next += stride + size;
672 }
673 }
674
675 if (read_lock == ECORE_SUCCESS) {
676 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)\n");
677 ecore_vlan_mac_h_read_unlock(sc, o);
678 }
679
680 return counter * ETH_ALEN;
681 }
682
683 /* check_add() callbacks */
ecore_check_mac_add(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)684 static int ecore_check_mac_add(struct bxe_softc *sc,
685 struct ecore_vlan_mac_obj *o,
686 union ecore_classification_ramrod_data *data)
687 {
688 struct ecore_vlan_mac_registry_elem *pos;
689
690 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
691
692 if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
693 return ECORE_INVAL;
694
695 /* Check if a requested MAC already exists */
696 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
697 struct ecore_vlan_mac_registry_elem)
698 if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
699 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
700 return ECORE_EXISTS;
701
702 return ECORE_SUCCESS;
703 }
704
ecore_check_vlan_add(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)705 static int ecore_check_vlan_add(struct bxe_softc *sc,
706 struct ecore_vlan_mac_obj *o,
707 union ecore_classification_ramrod_data *data)
708 {
709 struct ecore_vlan_mac_registry_elem *pos;
710
711 ECORE_MSG(sc, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
712
713 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
714 struct ecore_vlan_mac_registry_elem)
715 if (data->vlan.vlan == pos->u.vlan.vlan)
716 return ECORE_EXISTS;
717
718 return ECORE_SUCCESS;
719 }
720
ecore_check_vlan_mac_add(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)721 static int ecore_check_vlan_mac_add(struct bxe_softc *sc,
722 struct ecore_vlan_mac_obj *o,
723 union ecore_classification_ramrod_data *data)
724 {
725 struct ecore_vlan_mac_registry_elem *pos;
726
727 ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for ADD command\n",
728 data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
729
730 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
731 struct ecore_vlan_mac_registry_elem)
732 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
733 (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
734 ETH_ALEN)) &&
735 (data->vlan_mac.is_inner_mac ==
736 pos->u.vlan_mac.is_inner_mac))
737 return ECORE_EXISTS;
738
739 return ECORE_SUCCESS;
740 }
741
ecore_check_vxlan_fltr_add(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)742 static int ecore_check_vxlan_fltr_add(struct bxe_softc *sc,
743 struct ecore_vlan_mac_obj *o,
744 union ecore_classification_ramrod_data *data)
745 {
746 struct ecore_vlan_mac_registry_elem *pos;
747
748 ECORE_MSG(sc, "Checking VXLAN_FLTR (Inner:%pM, %d) for ADD command\n",
749 data->vxlan_fltr.innermac, data->vxlan_fltr.vni);
750
751 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
752 struct ecore_vlan_mac_registry_elem)
753 if ((!ECORE_MEMCMP(data->vxlan_fltr.innermac,
754 pos->u.vxlan_fltr.innermac,
755 ETH_ALEN)) &&
756 (data->vxlan_fltr.vni == pos->u.vxlan_fltr.vni))
757 return ECORE_EXISTS;
758
759 return ECORE_SUCCESS;
760 }
761
762 /* check_del() callbacks */
763 static struct ecore_vlan_mac_registry_elem *
ecore_check_mac_del(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)764 ecore_check_mac_del(struct bxe_softc *sc,
765 struct ecore_vlan_mac_obj *o,
766 union ecore_classification_ramrod_data *data)
767 {
768 struct ecore_vlan_mac_registry_elem *pos;
769
770 ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command\n", data->mac.mac[0], data->mac.mac[1], data->mac.mac[2], data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
771
772 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
773 struct ecore_vlan_mac_registry_elem)
774 if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
775 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
776 return pos;
777
778 return NULL;
779 }
780
781 static struct ecore_vlan_mac_registry_elem *
ecore_check_vlan_del(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)782 ecore_check_vlan_del(struct bxe_softc *sc,
783 struct ecore_vlan_mac_obj *o,
784 union ecore_classification_ramrod_data *data)
785 {
786 struct ecore_vlan_mac_registry_elem *pos;
787
788 ECORE_MSG(sc, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
789
790 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
791 struct ecore_vlan_mac_registry_elem)
792 if (data->vlan.vlan == pos->u.vlan.vlan)
793 return pos;
794
795 return NULL;
796 }
797
798 static struct ecore_vlan_mac_registry_elem *
ecore_check_vlan_mac_del(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)799 ecore_check_vlan_mac_del(struct bxe_softc *sc,
800 struct ecore_vlan_mac_obj *o,
801 union ecore_classification_ramrod_data *data)
802 {
803 struct ecore_vlan_mac_registry_elem *pos;
804
805 ECORE_MSG(sc, "Checking VLAN_MAC (%02x:%02x:%02x:%02x:%02x:%02x, %d) for DEL command\n",
806 data->vlan_mac.mac[0], data->vlan_mac.mac[1], data->vlan_mac.mac[2], data->vlan_mac.mac[3], data->vlan_mac.mac[4], data->vlan_mac.mac[5], data->vlan_mac.vlan);
807
808 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
809 struct ecore_vlan_mac_registry_elem)
810 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
811 (!ECORE_MEMCMP(data->vlan_mac.mac, pos->u.vlan_mac.mac,
812 ETH_ALEN)) &&
813 (data->vlan_mac.is_inner_mac ==
814 pos->u.vlan_mac.is_inner_mac))
815 return pos;
816
817 return NULL;
818 }
819
820 static struct ecore_vlan_mac_registry_elem *
ecore_check_vxlan_fltr_del(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,union ecore_classification_ramrod_data * data)821 ecore_check_vxlan_fltr_del
822 (struct bxe_softc *sc,
823 struct ecore_vlan_mac_obj *o,
824 union ecore_classification_ramrod_data *data)
825 {
826 struct ecore_vlan_mac_registry_elem *pos;
827
828 ECORE_MSG(sc, "Checking VXLAN_FLTR (Inner:%pM, %d) for DEL command\n",
829 data->vxlan_fltr.innermac, data->vxlan_fltr.vni);
830
831 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
832 struct ecore_vlan_mac_registry_elem)
833 if ((!ECORE_MEMCMP(data->vxlan_fltr.innermac,
834 pos->u.vxlan_fltr.innermac,
835 ETH_ALEN)) &&
836 (data->vxlan_fltr.vni == pos->u.vxlan_fltr.vni))
837 return pos;
838
839 return NULL;
840 }
841
842 /* check_move() callback */
ecore_check_move(struct bxe_softc * sc,struct ecore_vlan_mac_obj * src_o,struct ecore_vlan_mac_obj * dst_o,union ecore_classification_ramrod_data * data)843 static bool ecore_check_move(struct bxe_softc *sc,
844 struct ecore_vlan_mac_obj *src_o,
845 struct ecore_vlan_mac_obj *dst_o,
846 union ecore_classification_ramrod_data *data)
847 {
848 struct ecore_vlan_mac_registry_elem *pos;
849 int rc;
850
851 /* Check if we can delete the requested configuration from the first
852 * object.
853 */
854 pos = src_o->check_del(sc, src_o, data);
855
856 /* check if configuration can be added */
857 rc = dst_o->check_add(sc, dst_o, data);
858
859 /* If this classification can not be added (is already set)
860 * or can't be deleted - return an error.
861 */
862 if (rc || !pos)
863 return FALSE;
864
865 return TRUE;
866 }
867
ecore_check_move_always_err(struct bxe_softc * sc,struct ecore_vlan_mac_obj * src_o,struct ecore_vlan_mac_obj * dst_o,union ecore_classification_ramrod_data * data)868 static bool ecore_check_move_always_err(
869 struct bxe_softc *sc,
870 struct ecore_vlan_mac_obj *src_o,
871 struct ecore_vlan_mac_obj *dst_o,
872 union ecore_classification_ramrod_data *data)
873 {
874 return FALSE;
875 }
876
ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj * o)877 static inline uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj *o)
878 {
879 struct ecore_raw_obj *raw = &o->raw;
880 uint8_t rx_tx_flag = 0;
881
882 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
883 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
884 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
885
886 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
887 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
888 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
889
890 return rx_tx_flag;
891 }
892
ecore_set_mac_in_nig(struct bxe_softc * sc,bool add,unsigned char * dev_addr,int index)893 void ecore_set_mac_in_nig(struct bxe_softc *sc,
894 bool add, unsigned char *dev_addr, int index)
895 {
896 uint32_t wb_data[2];
897 uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
898 NIG_REG_LLH0_FUNC_MEM;
899
900 if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
901 return;
902
903 if (index > ECORE_LLH_CAM_MAX_PF_LINE)
904 return;
905
906 ECORE_MSG(sc, "Going to %s LLH configuration at entry %d\n",
907 (add ? "ADD" : "DELETE"), index);
908
909 if (add) {
910 /* LLH_FUNC_MEM is a uint64_t WB register */
911 reg_offset += 8*index;
912
913 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
914 (dev_addr[4] << 8) | dev_addr[5]);
915 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
916
917 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
918 }
919
920 REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
921 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
922 }
923
924 /**
925 * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
926 *
927 * @sc: device handle
928 * @o: queue for which we want to configure this rule
929 * @add: if TRUE the command is an ADD command, DEL otherwise
930 * @opcode: CLASSIFY_RULE_OPCODE_XXX
931 * @hdr: pointer to a header to setup
932 *
933 */
ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,bool add,int opcode,struct eth_classify_cmd_header * hdr)934 static inline void ecore_vlan_mac_set_cmd_hdr_e2(struct bxe_softc *sc,
935 struct ecore_vlan_mac_obj *o, bool add, int opcode,
936 struct eth_classify_cmd_header *hdr)
937 {
938 struct ecore_raw_obj *raw = &o->raw;
939
940 hdr->client_id = raw->cl_id;
941 hdr->func_id = raw->func_id;
942
943 /* Rx or/and Tx (internal switching) configuration ? */
944 hdr->cmd_general_data |=
945 ecore_vlan_mac_get_rx_tx_flag(o);
946
947 if (add)
948 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
949
950 hdr->cmd_general_data |=
951 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
952 }
953
954 /**
955 * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
956 *
957 * @cid: connection id
958 * @type: ECORE_FILTER_XXX_PENDING
959 * @hdr: pointer to header to setup
960 * @rule_cnt:
961 *
962 * currently we always configure one rule and echo field to contain a CID and an
963 * opcode type.
964 */
ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid,int type,struct eth_classify_header * hdr,int rule_cnt)965 static inline void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type,
966 struct eth_classify_header *hdr, int rule_cnt)
967 {
968 hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
969 (type << ECORE_SWCID_SHIFT));
970 hdr->rule_cnt = (uint8_t)rule_cnt;
971 }
972
973 /* hw_config() callbacks */
ecore_set_one_mac_e2(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,int rule_idx,int cam_offset)974 static void ecore_set_one_mac_e2(struct bxe_softc *sc,
975 struct ecore_vlan_mac_obj *o,
976 struct ecore_exeq_elem *elem, int rule_idx,
977 int cam_offset)
978 {
979 struct ecore_raw_obj *raw = &o->raw;
980 struct eth_classify_rules_ramrod_data *data =
981 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
982 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
983 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
984 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
985 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
986 uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
987
988 /* Set LLH CAM entry: currently only iSCSI and ETH macs are
989 * relevant. In addition, current implementation is tuned for a
990 * single ETH MAC.
991 *
992 * When multiple unicast ETH MACs PF configuration in switch
993 * independent mode is required (NetQ, multiple netdev MACs,
994 * etc.), consider better utilisation of 8 per function MAC
995 * entries in the LLH register. There is also
996 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
997 * total number of CAM entries to 16.
998 *
999 * Currently we won't configure NIG for MACs other than a primary ETH
1000 * MAC and iSCSI L2 MAC.
1001 *
1002 * If this MAC is moving from one Queue to another, no need to change
1003 * NIG configuration.
1004 */
1005 if (cmd != ECORE_VLAN_MAC_MOVE) {
1006 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
1007 ecore_set_mac_in_nig(sc, add, mac,
1008 ECORE_LLH_CAM_ISCSI_ETH_LINE);
1009 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
1010 ecore_set_mac_in_nig(sc, add, mac,
1011 ECORE_LLH_CAM_ETH_LINE);
1012 }
1013
1014 /* Reset the ramrod data buffer for the first rule */
1015 if (rule_idx == 0)
1016 ECORE_MEMSET(data, 0, sizeof(*data));
1017
1018 /* Setup a command header */
1019 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_MAC,
1020 &rule_entry->mac.header);
1021
1022 ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d\n",
1023 (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id);
1024
1025 /* Set a MAC itself */
1026 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1027 &rule_entry->mac.mac_mid,
1028 &rule_entry->mac.mac_lsb, mac);
1029 rule_entry->mac.inner_mac =
1030 ECORE_CPU_TO_LE16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
1031
1032 /* MOVE: Add a rule that will add this MAC to the target Queue */
1033 if (cmd == ECORE_VLAN_MAC_MOVE) {
1034 rule_entry++;
1035 rule_cnt++;
1036
1037 /* Setup ramrod data */
1038 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1039 elem->cmd_data.vlan_mac.target_obj,
1040 TRUE, CLASSIFY_RULE_OPCODE_MAC,
1041 &rule_entry->mac.header);
1042
1043 /* Set a MAC itself */
1044 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
1045 &rule_entry->mac.mac_mid,
1046 &rule_entry->mac.mac_lsb, mac);
1047 rule_entry->mac.inner_mac =
1048 ECORE_CPU_TO_LE16(elem->cmd_data.vlan_mac.
1049 u.mac.is_inner_mac);
1050 }
1051
1052 /* Set the ramrod data header */
1053 /* TODO: take this to the higher level in order to prevent multiple
1054 writing */
1055 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1056 rule_cnt);
1057 }
1058
1059 /**
1060 * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
1061 *
1062 * @sc: device handle
1063 * @o: queue
1064 * @type:
1065 * @cam_offset: offset in cam memory
1066 * @hdr: pointer to a header to setup
1067 *
1068 * E1/E1H
1069 */
ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,int type,int cam_offset,struct mac_configuration_hdr * hdr)1070 static inline void ecore_vlan_mac_set_rdata_hdr_e1x(struct bxe_softc *sc,
1071 struct ecore_vlan_mac_obj *o, int type, int cam_offset,
1072 struct mac_configuration_hdr *hdr)
1073 {
1074 struct ecore_raw_obj *r = &o->raw;
1075
1076 hdr->length = 1;
1077 hdr->offset = (uint8_t)cam_offset;
1078 hdr->client_id = ECORE_CPU_TO_LE16(0xff);
1079 hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
1080 (type << ECORE_SWCID_SHIFT));
1081 }
1082
ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,bool add,int opcode,uint8_t * mac,uint16_t vlan_id,struct mac_configuration_entry * cfg_entry)1083 static inline void ecore_vlan_mac_set_cfg_entry_e1x(struct bxe_softc *sc,
1084 struct ecore_vlan_mac_obj *o, bool add, int opcode, uint8_t *mac,
1085 uint16_t vlan_id, struct mac_configuration_entry *cfg_entry)
1086 {
1087 struct ecore_raw_obj *r = &o->raw;
1088 uint32_t cl_bit_vec = (1 << r->cl_id);
1089
1090 cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
1091 cfg_entry->pf_id = r->func_id;
1092 cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
1093
1094 if (add) {
1095 ECORE_SET_FLAG(cfg_entry->flags,
1096 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1097 T_ETH_MAC_COMMAND_SET);
1098 ECORE_SET_FLAG(cfg_entry->flags,
1099 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
1100 opcode);
1101
1102 /* Set a MAC in a ramrod data */
1103 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
1104 &cfg_entry->middle_mac_addr,
1105 &cfg_entry->lsb_mac_addr, mac);
1106 } else
1107 ECORE_SET_FLAG(cfg_entry->flags,
1108 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
1109 T_ETH_MAC_COMMAND_INVALIDATE);
1110 }
1111
ecore_vlan_mac_set_rdata_e1x(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,int type,int cam_offset,bool add,uint8_t * mac,uint16_t vlan_id,int opcode,struct mac_configuration_cmd * config)1112 static inline void ecore_vlan_mac_set_rdata_e1x(struct bxe_softc *sc,
1113 struct ecore_vlan_mac_obj *o, int type, int cam_offset, bool add,
1114 uint8_t *mac, uint16_t vlan_id, int opcode, struct mac_configuration_cmd *config)
1115 {
1116 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
1117 struct ecore_raw_obj *raw = &o->raw;
1118
1119 ecore_vlan_mac_set_rdata_hdr_e1x(sc, o, type, cam_offset,
1120 &config->hdr);
1121 ecore_vlan_mac_set_cfg_entry_e1x(sc, o, add, opcode, mac, vlan_id,
1122 cfg_entry);
1123
1124 ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d\n",
1125 (add ? "setting" : "clearing"),
1126 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], raw->cl_id, cam_offset);
1127 }
1128
1129 /**
1130 * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
1131 *
1132 * @sc: device handle
1133 * @o: ecore_vlan_mac_obj
1134 * @elem: ecore_exeq_elem
1135 * @rule_idx: rule_idx
1136 * @cam_offset: cam_offset
1137 */
ecore_set_one_mac_e1x(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,int rule_idx,int cam_offset)1138 static void ecore_set_one_mac_e1x(struct bxe_softc *sc,
1139 struct ecore_vlan_mac_obj *o,
1140 struct ecore_exeq_elem *elem, int rule_idx,
1141 int cam_offset)
1142 {
1143 struct ecore_raw_obj *raw = &o->raw;
1144 struct mac_configuration_cmd *config =
1145 (struct mac_configuration_cmd *)(raw->rdata);
1146 /* 57710 and 57711 do not support MOVE command,
1147 * so it's either ADD or DEL
1148 */
1149 bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1150 TRUE : FALSE;
1151
1152 /* Reset the ramrod data buffer */
1153 ECORE_MEMSET(config, 0, sizeof(*config));
1154
1155 ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
1156 cam_offset, add,
1157 elem->cmd_data.vlan_mac.u.mac.mac, 0,
1158 ETH_VLAN_FILTER_ANY_VLAN, config);
1159 }
1160
ecore_set_one_vlan_e2(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,int rule_idx,int cam_offset)1161 static void ecore_set_one_vlan_e2(struct bxe_softc *sc,
1162 struct ecore_vlan_mac_obj *o,
1163 struct ecore_exeq_elem *elem, int rule_idx,
1164 int cam_offset)
1165 {
1166 struct ecore_raw_obj *raw = &o->raw;
1167 struct eth_classify_rules_ramrod_data *data =
1168 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1169 int rule_cnt = rule_idx + 1;
1170 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1171 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1172 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1173 uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
1174
1175 /* Reset the ramrod data buffer for the first rule */
1176 if (rule_idx == 0)
1177 ECORE_MEMSET(data, 0, sizeof(*data));
1178
1179 /* Set a rule header */
1180 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1181 &rule_entry->vlan.header);
1182
1183 ECORE_MSG(sc, "About to %s VLAN %d\n", (add ? "add" : "delete"),
1184 vlan);
1185
1186 /* Set a VLAN itself */
1187 rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1188
1189 /* MOVE: Add a rule that will add this MAC to the target Queue */
1190 if (cmd == ECORE_VLAN_MAC_MOVE) {
1191 rule_entry++;
1192 rule_cnt++;
1193
1194 /* Setup ramrod data */
1195 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1196 elem->cmd_data.vlan_mac.target_obj,
1197 TRUE, CLASSIFY_RULE_OPCODE_VLAN,
1198 &rule_entry->vlan.header);
1199
1200 /* Set a VLAN itself */
1201 rule_entry->vlan.vlan = ECORE_CPU_TO_LE16(vlan);
1202 }
1203
1204 /* Set the ramrod data header */
1205 /* TODO: take this to the higher level in order to prevent multiple
1206 writing */
1207 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1208 rule_cnt);
1209 }
1210
ecore_set_one_vlan_mac_e2(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,int rule_idx,int cam_offset)1211 static void ecore_set_one_vlan_mac_e2(struct bxe_softc *sc,
1212 struct ecore_vlan_mac_obj *o,
1213 struct ecore_exeq_elem *elem,
1214 int rule_idx, int cam_offset)
1215 {
1216 struct ecore_raw_obj *raw = &o->raw;
1217 struct eth_classify_rules_ramrod_data *data =
1218 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1219 int rule_cnt = rule_idx + 1;
1220 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1221 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1222 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1223 uint16_t vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
1224 uint8_t *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
1225
1226 /* Reset the ramrod data buffer for the first rule */
1227 if (rule_idx == 0)
1228 ECORE_MEMSET(data, 0, sizeof(*data));
1229
1230 /* Set a rule header */
1231 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1232 &rule_entry->pair.header);
1233
1234 /* Set VLAN and MAC themselves */
1235 rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1236 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1237 &rule_entry->pair.mac_mid,
1238 &rule_entry->pair.mac_lsb, mac);
1239 rule_entry->pair.inner_mac =
1240 elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1241 /* MOVE: Add a rule that will add this MAC to the target Queue */
1242 if (cmd == ECORE_VLAN_MAC_MOVE) {
1243 rule_entry++;
1244 rule_cnt++;
1245
1246 /* Setup ramrod data */
1247 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1248 elem->cmd_data.vlan_mac.target_obj,
1249 TRUE, CLASSIFY_RULE_OPCODE_PAIR,
1250 &rule_entry->pair.header);
1251
1252 /* Set a VLAN itself */
1253 rule_entry->pair.vlan = ECORE_CPU_TO_LE16(vlan);
1254 ecore_set_fw_mac_addr(&rule_entry->pair.mac_msb,
1255 &rule_entry->pair.mac_mid,
1256 &rule_entry->pair.mac_lsb, mac);
1257 rule_entry->pair.inner_mac =
1258 elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
1259 }
1260
1261 /* Set the ramrod data header */
1262 /* TODO: take this to the higher level in order to prevent multiple
1263 writing */
1264 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
1265 rule_cnt);
1266 }
1267
ecore_set_one_vxlan_fltr_e2(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,int rule_idx,int cam_offset)1268 static void ecore_set_one_vxlan_fltr_e2(struct bxe_softc *sc,
1269 struct ecore_vlan_mac_obj *o,
1270 struct ecore_exeq_elem *elem,
1271 int rule_idx, int cam_offset)
1272 {
1273 struct ecore_raw_obj *raw = &o->raw;
1274 struct eth_classify_rules_ramrod_data *data =
1275 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
1276 int rule_cnt = rule_idx + 1;
1277 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
1278 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1279 bool add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
1280 uint32_t vni = elem->cmd_data.vlan_mac.u.vxlan_fltr.vni;
1281 uint8_t *mac = elem->cmd_data.vlan_mac.u.vxlan_fltr.innermac;
1282
1283 /* Reset the ramrod data buffer for the first rule */
1284 if (rule_idx == 0)
1285 ECORE_MEMSET(data, 0, sizeof(*data));
1286
1287 /* Set a rule header */
1288 ecore_vlan_mac_set_cmd_hdr_e2(sc, o, add,
1289 CLASSIFY_RULE_OPCODE_IMAC_VNI,
1290 &rule_entry->imac_vni.header);
1291
1292 /* Set VLAN and MAC themselves */
1293 rule_entry->imac_vni.vni = vni;
1294 ecore_set_fw_mac_addr(&rule_entry->imac_vni.imac_msb,
1295 &rule_entry->imac_vni.imac_mid,
1296 &rule_entry->imac_vni.imac_lsb, mac);
1297
1298 /* MOVE: Add a rule that will add this MAC to the target Queue */
1299 if (cmd == ECORE_VLAN_MAC_MOVE) {
1300 rule_entry++;
1301 rule_cnt++;
1302
1303 /* Setup ramrod data */
1304 ecore_vlan_mac_set_cmd_hdr_e2(sc,
1305 elem->cmd_data.vlan_mac.target_obj,
1306 TRUE, CLASSIFY_RULE_OPCODE_IMAC_VNI,
1307 &rule_entry->imac_vni.header);
1308
1309 /* Set a VLAN itself */
1310 rule_entry->imac_vni.vni = vni;
1311 ecore_set_fw_mac_addr(&rule_entry->imac_vni.imac_msb,
1312 &rule_entry->imac_vni.imac_mid,
1313 &rule_entry->imac_vni.imac_lsb, mac);
1314 }
1315
1316 /* Set the ramrod data header */
1317 /* TODO: take this to the higher level in order to prevent multiple
1318 * writing
1319 */
1320 ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state,
1321 &data->header, rule_cnt);
1322 }
1323
1324 /**
1325 * ecore_set_one_vlan_mac_e1h -
1326 *
1327 * @sc: device handle
1328 * @o: ecore_vlan_mac_obj
1329 * @elem: ecore_exeq_elem
1330 * @rule_idx: rule_idx
1331 * @cam_offset: cam_offset
1332 */
ecore_set_one_vlan_mac_e1h(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,int rule_idx,int cam_offset)1333 static void ecore_set_one_vlan_mac_e1h(struct bxe_softc *sc,
1334 struct ecore_vlan_mac_obj *o,
1335 struct ecore_exeq_elem *elem,
1336 int rule_idx, int cam_offset)
1337 {
1338 struct ecore_raw_obj *raw = &o->raw;
1339 struct mac_configuration_cmd *config =
1340 (struct mac_configuration_cmd *)(raw->rdata);
1341 /* 57710 and 57711 do not support MOVE command,
1342 * so it's either ADD or DEL
1343 */
1344 bool add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1345 TRUE : FALSE;
1346
1347 /* Reset the ramrod data buffer */
1348 ECORE_MEMSET(config, 0, sizeof(*config));
1349
1350 ecore_vlan_mac_set_rdata_e1x(sc, o, ECORE_FILTER_VLAN_MAC_PENDING,
1351 cam_offset, add,
1352 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
1353 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
1354 ETH_VLAN_FILTER_CLASSIFY, config);
1355 }
1356
1357 #define list_next_entry(pos, member) \
1358 list_entry((pos)->member.next, typeof(*(pos)), member)
1359
1360 /**
1361 * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
1362 *
1363 * @sc: device handle
1364 * @p: command parameters
1365 * @ppos: pointer to the cookie
1366 *
1367 * reconfigure next MAC/VLAN/VLAN-MAC element from the
1368 * previously configured elements list.
1369 *
1370 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
1371 * into an account
1372 *
1373 * pointer to the cookie - that should be given back in the next call to make
1374 * function handle the next element. If *ppos is set to NULL it will restart the
1375 * iterator. If returned *ppos == NULL this means that the last element has been
1376 * handled.
1377 *
1378 */
ecore_vlan_mac_restore(struct bxe_softc * sc,struct ecore_vlan_mac_ramrod_params * p,struct ecore_vlan_mac_registry_elem ** ppos)1379 static int ecore_vlan_mac_restore(struct bxe_softc *sc,
1380 struct ecore_vlan_mac_ramrod_params *p,
1381 struct ecore_vlan_mac_registry_elem **ppos)
1382 {
1383 struct ecore_vlan_mac_registry_elem *pos;
1384 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1385
1386 /* If list is empty - there is nothing to do here */
1387 if (ECORE_LIST_IS_EMPTY(&o->head)) {
1388 *ppos = NULL;
1389 return 0;
1390 }
1391
1392 /* make a step... */
1393 if (*ppos == NULL)
1394 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head,
1395 struct ecore_vlan_mac_registry_elem,
1396 link);
1397 else
1398 *ppos = ECORE_LIST_NEXT(*ppos, link,
1399 struct ecore_vlan_mac_registry_elem);
1400
1401 pos = *ppos;
1402
1403 /* If it's the last step - return NULL */
1404 if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1405 *ppos = NULL;
1406
1407 /* Prepare a 'user_req' */
1408 ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1409
1410 /* Set the command */
1411 p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1412
1413 /* Set vlan_mac_flags */
1414 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1415
1416 /* Set a restore bit */
1417 ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1418
1419 return ecore_config_vlan_mac(sc, p);
1420 }
1421
1422 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1423 * pointer to an element with a specific criteria and NULL if such an element
1424 * hasn't been found.
1425 */
ecore_exeq_get_mac(struct ecore_exe_queue_obj * o,struct ecore_exeq_elem * elem)1426 static struct ecore_exeq_elem *ecore_exeq_get_mac(
1427 struct ecore_exe_queue_obj *o,
1428 struct ecore_exeq_elem *elem)
1429 {
1430 struct ecore_exeq_elem *pos;
1431 struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1432
1433 /* Check pending for execution commands */
1434 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1435 struct ecore_exeq_elem)
1436 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1437 sizeof(*data)) &&
1438 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1439 return pos;
1440
1441 return NULL;
1442 }
1443
ecore_exeq_get_vlan(struct ecore_exe_queue_obj * o,struct ecore_exeq_elem * elem)1444 static struct ecore_exeq_elem *ecore_exeq_get_vlan(
1445 struct ecore_exe_queue_obj *o,
1446 struct ecore_exeq_elem *elem)
1447 {
1448 struct ecore_exeq_elem *pos;
1449 struct ecore_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1450
1451 /* Check pending for execution commands */
1452 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1453 struct ecore_exeq_elem)
1454 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan, data,
1455 sizeof(*data)) &&
1456 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1457 return pos;
1458
1459 return NULL;
1460 }
1461
ecore_exeq_get_vlan_mac(struct ecore_exe_queue_obj * o,struct ecore_exeq_elem * elem)1462 static struct ecore_exeq_elem *ecore_exeq_get_vlan_mac(
1463 struct ecore_exe_queue_obj *o,
1464 struct ecore_exeq_elem *elem)
1465 {
1466 struct ecore_exeq_elem *pos;
1467 struct ecore_vlan_mac_ramrod_data *data =
1468 &elem->cmd_data.vlan_mac.u.vlan_mac;
1469
1470 /* Check pending for execution commands */
1471 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1472 struct ecore_exeq_elem)
1473 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1474 sizeof(*data)) &&
1475 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1476 return pos;
1477
1478 return NULL;
1479 }
1480
ecore_exeq_get_vxlan_fltr(struct ecore_exe_queue_obj * o,struct ecore_exeq_elem * elem)1481 static struct ecore_exeq_elem *ecore_exeq_get_vxlan_fltr
1482 (struct ecore_exe_queue_obj *o,
1483 struct ecore_exeq_elem *elem)
1484 {
1485 struct ecore_exeq_elem *pos;
1486 struct ecore_vxlan_fltr_ramrod_data *data =
1487 &elem->cmd_data.vlan_mac.u.vxlan_fltr;
1488
1489 /* Check pending for execution commands */
1490 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1491 struct ecore_exeq_elem)
1492 if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.vxlan_fltr, data,
1493 sizeof(*data)) &&
1494 (pos->cmd_data.vlan_mac.cmd ==
1495 elem->cmd_data.vlan_mac.cmd))
1496 return pos;
1497
1498 return NULL;
1499 }
1500
1501 /**
1502 * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1503 *
1504 * @sc: device handle
1505 * @qo: ecore_qable_obj
1506 * @elem: ecore_exeq_elem
1507 *
1508 * Checks that the requested configuration can be added. If yes and if
1509 * requested, consume CAM credit.
1510 *
1511 * The 'validate' is run after the 'optimize'.
1512 *
1513 */
ecore_validate_vlan_mac_add(struct bxe_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1514 static inline int ecore_validate_vlan_mac_add(struct bxe_softc *sc,
1515 union ecore_qable_obj *qo,
1516 struct ecore_exeq_elem *elem)
1517 {
1518 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1519 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1520 int rc;
1521
1522 /* Check the registry */
1523 rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1524 if (rc) {
1525 ECORE_MSG(sc, "ADD command is not allowed considering current registry state.\n");
1526 return rc;
1527 }
1528
1529 /* Check if there is a pending ADD command for this
1530 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1531 */
1532 if (exeq->get(exeq, elem)) {
1533 ECORE_MSG(sc, "There is a pending ADD command already\n");
1534 return ECORE_EXISTS;
1535 }
1536
1537 /* TODO: Check the pending MOVE from other objects where this
1538 * object is a destination object.
1539 */
1540
1541 /* Consume the credit if not requested not to */
1542 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1543 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1544 o->get_credit(o)))
1545 return ECORE_INVAL;
1546
1547 return ECORE_SUCCESS;
1548 }
1549
1550 /**
1551 * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1552 *
1553 * @sc: device handle
1554 * @qo: quable object to check
1555 * @elem: element that needs to be deleted
1556 *
1557 * Checks that the requested configuration can be deleted. If yes and if
1558 * requested, returns a CAM credit.
1559 *
1560 * The 'validate' is run after the 'optimize'.
1561 */
ecore_validate_vlan_mac_del(struct bxe_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1562 static inline int ecore_validate_vlan_mac_del(struct bxe_softc *sc,
1563 union ecore_qable_obj *qo,
1564 struct ecore_exeq_elem *elem)
1565 {
1566 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1567 struct ecore_vlan_mac_registry_elem *pos;
1568 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1569 struct ecore_exeq_elem query_elem;
1570
1571 /* If this classification can not be deleted (doesn't exist)
1572 * - return a ECORE_EXIST.
1573 */
1574 pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1575 if (!pos) {
1576 ECORE_MSG(sc, "DEL command is not allowed considering current registry state\n");
1577 return ECORE_EXISTS;
1578 }
1579
1580 /* Check if there are pending DEL or MOVE commands for this
1581 * MAC/VLAN/VLAN-MAC. Return an error if so.
1582 */
1583 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1584
1585 /* Check for MOVE commands */
1586 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1587 if (exeq->get(exeq, &query_elem)) {
1588 ECORE_ERR("There is a pending MOVE command already\n");
1589 return ECORE_INVAL;
1590 }
1591
1592 /* Check for DEL commands */
1593 if (exeq->get(exeq, elem)) {
1594 ECORE_MSG(sc, "There is a pending DEL command already\n");
1595 return ECORE_EXISTS;
1596 }
1597
1598 /* Return the credit to the credit pool if not requested not to */
1599 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1600 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1601 o->put_credit(o))) {
1602 ECORE_ERR("Failed to return a credit\n");
1603 return ECORE_INVAL;
1604 }
1605
1606 return ECORE_SUCCESS;
1607 }
1608
1609 /**
1610 * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1611 *
1612 * @sc: device handle
1613 * @qo: quable object to check (source)
1614 * @elem: element that needs to be moved
1615 *
1616 * Checks that the requested configuration can be moved. If yes and if
1617 * requested, returns a CAM credit.
1618 *
1619 * The 'validate' is run after the 'optimize'.
1620 */
ecore_validate_vlan_mac_move(struct bxe_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1621 static inline int ecore_validate_vlan_mac_move(struct bxe_softc *sc,
1622 union ecore_qable_obj *qo,
1623 struct ecore_exeq_elem *elem)
1624 {
1625 struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1626 struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1627 struct ecore_exeq_elem query_elem;
1628 struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1629 struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1630
1631 /* Check if we can perform this operation based on the current registry
1632 * state.
1633 */
1634 if (!src_o->check_move(sc, src_o, dest_o,
1635 &elem->cmd_data.vlan_mac.u)) {
1636 ECORE_MSG(sc, "MOVE command is not allowed considering current registry state\n");
1637 return ECORE_INVAL;
1638 }
1639
1640 /* Check if there is an already pending DEL or MOVE command for the
1641 * source object or ADD command for a destination object. Return an
1642 * error if so.
1643 */
1644 ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1645
1646 /* Check DEL on source */
1647 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1648 if (src_exeq->get(src_exeq, &query_elem)) {
1649 ECORE_ERR("There is a pending DEL command on the source queue already\n");
1650 return ECORE_INVAL;
1651 }
1652
1653 /* Check MOVE on source */
1654 if (src_exeq->get(src_exeq, elem)) {
1655 ECORE_MSG(sc, "There is a pending MOVE command already\n");
1656 return ECORE_EXISTS;
1657 }
1658
1659 /* Check ADD on destination */
1660 query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1661 if (dest_exeq->get(dest_exeq, &query_elem)) {
1662 ECORE_ERR("There is a pending ADD command on the destination queue already\n");
1663 return ECORE_INVAL;
1664 }
1665
1666 /* Consume the credit if not requested not to */
1667 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1668 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1669 dest_o->get_credit(dest_o)))
1670 return ECORE_INVAL;
1671
1672 if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1673 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1674 src_o->put_credit(src_o))) {
1675 /* return the credit taken from dest... */
1676 dest_o->put_credit(dest_o);
1677 return ECORE_INVAL;
1678 }
1679
1680 return ECORE_SUCCESS;
1681 }
1682
ecore_validate_vlan_mac(struct bxe_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1683 static int ecore_validate_vlan_mac(struct bxe_softc *sc,
1684 union ecore_qable_obj *qo,
1685 struct ecore_exeq_elem *elem)
1686 {
1687 switch (elem->cmd_data.vlan_mac.cmd) {
1688 case ECORE_VLAN_MAC_ADD:
1689 return ecore_validate_vlan_mac_add(sc, qo, elem);
1690 case ECORE_VLAN_MAC_DEL:
1691 return ecore_validate_vlan_mac_del(sc, qo, elem);
1692 case ECORE_VLAN_MAC_MOVE:
1693 return ecore_validate_vlan_mac_move(sc, qo, elem);
1694 default:
1695 return ECORE_INVAL;
1696 }
1697 }
1698
ecore_remove_vlan_mac(struct bxe_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1699 static int ecore_remove_vlan_mac(struct bxe_softc *sc,
1700 union ecore_qable_obj *qo,
1701 struct ecore_exeq_elem *elem)
1702 {
1703 int rc = 0;
1704
1705 /* If consumption wasn't required, nothing to do */
1706 if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1707 &elem->cmd_data.vlan_mac.vlan_mac_flags))
1708 return ECORE_SUCCESS;
1709
1710 switch (elem->cmd_data.vlan_mac.cmd) {
1711 case ECORE_VLAN_MAC_ADD:
1712 case ECORE_VLAN_MAC_MOVE:
1713 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1714 break;
1715 case ECORE_VLAN_MAC_DEL:
1716 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1717 break;
1718 default:
1719 return ECORE_INVAL;
1720 }
1721
1722 if (rc != TRUE)
1723 return ECORE_INVAL;
1724
1725 return ECORE_SUCCESS;
1726 }
1727
1728 /**
1729 * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1730 *
1731 * @sc: device handle
1732 * @o: ecore_vlan_mac_obj
1733 *
1734 */
ecore_wait_vlan_mac(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o)1735 static int ecore_wait_vlan_mac(struct bxe_softc *sc,
1736 struct ecore_vlan_mac_obj *o)
1737 {
1738 int cnt = 5000, rc;
1739 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1740 struct ecore_raw_obj *raw = &o->raw;
1741
1742 while (cnt--) {
1743 /* Wait for the current command to complete */
1744 rc = raw->wait_comp(sc, raw);
1745 if (rc)
1746 return rc;
1747
1748 /* Wait until there are no pending commands */
1749 if (!ecore_exe_queue_empty(exeq))
1750 ECORE_WAIT(sc, 1000);
1751 else
1752 return ECORE_SUCCESS;
1753 }
1754
1755 return ECORE_TIMEOUT;
1756 }
1757
__ecore_vlan_mac_execute_step(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,unsigned long * ramrod_flags)1758 static int __ecore_vlan_mac_execute_step(struct bxe_softc *sc,
1759 struct ecore_vlan_mac_obj *o,
1760 unsigned long *ramrod_flags)
1761 {
1762 int rc = ECORE_SUCCESS;
1763
1764 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1765
1766 ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock\n");
1767 rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1768
1769 if (rc != ECORE_SUCCESS) {
1770 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1771
1772 /** Calling function should not diffrentiate between this case
1773 * and the case in which there is already a pending ramrod
1774 */
1775 rc = ECORE_PENDING;
1776 } else {
1777 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1778 }
1779 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1780
1781 return rc;
1782 }
1783
1784 /**
1785 * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1786 *
1787 * @sc: device handle
1788 * @o: ecore_vlan_mac_obj
1789 * @cqe:
1790 * @cont: if TRUE schedule next execution chunk
1791 *
1792 */
ecore_complete_vlan_mac(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,union event_ring_elem * cqe,unsigned long * ramrod_flags)1793 static int ecore_complete_vlan_mac(struct bxe_softc *sc,
1794 struct ecore_vlan_mac_obj *o,
1795 union event_ring_elem *cqe,
1796 unsigned long *ramrod_flags)
1797 {
1798 struct ecore_raw_obj *r = &o->raw;
1799 int rc;
1800
1801 /* Clearing the pending list & raw state should be made
1802 * atomically (as execution flow assumes they represent the same)
1803 */
1804 ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1805
1806 /* Reset pending list */
1807 __ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1808
1809 /* Clear pending */
1810 r->clear_pending(r);
1811
1812 ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1813
1814 /* If ramrod failed this is most likely a SW bug */
1815 if (cqe->message.error)
1816 return ECORE_INVAL;
1817
1818 /* Run the next bulk of pending commands if requested */
1819 if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1820 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1821 if (rc < 0)
1822 return rc;
1823 }
1824
1825 /* If there is more work to do return PENDING */
1826 if (!ecore_exe_queue_empty(&o->exe_queue))
1827 return ECORE_PENDING;
1828
1829 return ECORE_SUCCESS;
1830 }
1831
1832 /**
1833 * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1834 *
1835 * @sc: device handle
1836 * @o: ecore_qable_obj
1837 * @elem: ecore_exeq_elem
1838 */
ecore_optimize_vlan_mac(struct bxe_softc * sc,union ecore_qable_obj * qo,struct ecore_exeq_elem * elem)1839 static int ecore_optimize_vlan_mac(struct bxe_softc *sc,
1840 union ecore_qable_obj *qo,
1841 struct ecore_exeq_elem *elem)
1842 {
1843 struct ecore_exeq_elem query, *pos;
1844 struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1845 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1846
1847 ECORE_MEMCPY(&query, elem, sizeof(query));
1848
1849 switch (elem->cmd_data.vlan_mac.cmd) {
1850 case ECORE_VLAN_MAC_ADD:
1851 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1852 break;
1853 case ECORE_VLAN_MAC_DEL:
1854 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1855 break;
1856 default:
1857 /* Don't handle anything other than ADD or DEL */
1858 return 0;
1859 }
1860
1861 /* If we found the appropriate element - delete it */
1862 pos = exeq->get(exeq, &query);
1863 if (pos) {
1864
1865 /* Return the credit of the optimized command */
1866 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1867 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1868 if ((query.cmd_data.vlan_mac.cmd ==
1869 ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1870 ECORE_ERR("Failed to return the credit for the optimized ADD command\n");
1871 return ECORE_INVAL;
1872 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1873 ECORE_ERR("Failed to recover the credit from the optimized DEL command\n");
1874 return ECORE_INVAL;
1875 }
1876 }
1877
1878 ECORE_MSG(sc, "Optimizing %s command\n",
1879 (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1880 "ADD" : "DEL");
1881
1882 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1883 ecore_exe_queue_free_elem(sc, pos);
1884 return 1;
1885 }
1886
1887 return 0;
1888 }
1889
1890 /**
1891 * ecore_vlan_mac_get_registry_elem - prepare a registry element
1892 *
1893 * @sc: device handle
1894 * @o:
1895 * @elem:
1896 * @restore:
1897 * @re:
1898 *
1899 * prepare a registry element according to the current command request.
1900 */
ecore_vlan_mac_get_registry_elem(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,struct ecore_exeq_elem * elem,bool restore,struct ecore_vlan_mac_registry_elem ** re)1901 static inline int ecore_vlan_mac_get_registry_elem(
1902 struct bxe_softc *sc,
1903 struct ecore_vlan_mac_obj *o,
1904 struct ecore_exeq_elem *elem,
1905 bool restore,
1906 struct ecore_vlan_mac_registry_elem **re)
1907 {
1908 enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1909 struct ecore_vlan_mac_registry_elem *reg_elem;
1910
1911 /* Allocate a new registry element if needed. */
1912 if (!restore &&
1913 ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1914 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1915 if (!reg_elem)
1916 return ECORE_NOMEM;
1917
1918 /* Get a new CAM offset */
1919 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1920 /* This shall never happen, because we have checked the
1921 * CAM availability in the 'validate'.
1922 */
1923 ECORE_DBG_BREAK_IF(1);
1924 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1925 return ECORE_INVAL;
1926 }
1927
1928 ECORE_MSG(sc, "Got cam offset %d\n", reg_elem->cam_offset);
1929
1930 /* Set a VLAN-MAC data */
1931 ECORE_MEMCPY(®_elem->u, &elem->cmd_data.vlan_mac.u,
1932 sizeof(reg_elem->u));
1933
1934 /* Copy the flags (needed for DEL and RESTORE flows) */
1935 reg_elem->vlan_mac_flags =
1936 elem->cmd_data.vlan_mac.vlan_mac_flags;
1937 } else /* DEL, RESTORE */
1938 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1939
1940 *re = reg_elem;
1941 return ECORE_SUCCESS;
1942 }
1943
1944 /**
1945 * ecore_execute_vlan_mac - execute vlan mac command
1946 *
1947 * @sc: device handle
1948 * @qo:
1949 * @exe_chunk:
1950 * @ramrod_flags:
1951 *
1952 * go and send a ramrod!
1953 */
ecore_execute_vlan_mac(struct bxe_softc * sc,union ecore_qable_obj * qo,ecore_list_t * exe_chunk,unsigned long * ramrod_flags)1954 static int ecore_execute_vlan_mac(struct bxe_softc *sc,
1955 union ecore_qable_obj *qo,
1956 ecore_list_t *exe_chunk,
1957 unsigned long *ramrod_flags)
1958 {
1959 struct ecore_exeq_elem *elem;
1960 struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1961 struct ecore_raw_obj *r = &o->raw;
1962 int rc, idx = 0;
1963 bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1964 bool drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1965 struct ecore_vlan_mac_registry_elem *reg_elem;
1966 enum ecore_vlan_mac_cmd cmd;
1967
1968 /* If DRIVER_ONLY execution is requested, cleanup a registry
1969 * and exit. Otherwise send a ramrod to FW.
1970 */
1971 if (!drv_only) {
1972 ECORE_DBG_BREAK_IF(r->check_pending(r));
1973
1974 /* Set pending */
1975 r->set_pending(r);
1976
1977 /* Fill the ramrod data */
1978 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1979 struct ecore_exeq_elem) {
1980 cmd = elem->cmd_data.vlan_mac.cmd;
1981 /* We will add to the target object in MOVE command, so
1982 * change the object for a CAM search.
1983 */
1984 if (cmd == ECORE_VLAN_MAC_MOVE)
1985 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1986 else
1987 cam_obj = o;
1988
1989 rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1990 elem, restore,
1991 ®_elem);
1992 if (rc)
1993 goto error_exit;
1994
1995 ECORE_DBG_BREAK_IF(!reg_elem);
1996
1997 /* Push a new entry into the registry */
1998 if (!restore &&
1999 ((cmd == ECORE_VLAN_MAC_ADD) ||
2000 (cmd == ECORE_VLAN_MAC_MOVE)))
2001 ECORE_LIST_PUSH_HEAD(®_elem->link,
2002 &cam_obj->head);
2003
2004 /* Configure a single command in a ramrod data buffer */
2005 o->set_one_rule(sc, o, elem, idx,
2006 reg_elem->cam_offset);
2007
2008 /* MOVE command consumes 2 entries in the ramrod data */
2009 if (cmd == ECORE_VLAN_MAC_MOVE)
2010 idx += 2;
2011 else
2012 idx++;
2013 }
2014
2015 /* No need for an explicit memory barrier here as long as we
2016 * ensure the ordering of writing to the SPQ element
2017 * and updating of the SPQ producer which involves a memory
2018 * read. If the memory read is removed we will have to put a
2019 * full memory barrier there (inside ecore_sp_post()).
2020 */
2021 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
2022 r->rdata_mapping,
2023 ETH_CONNECTION_TYPE);
2024 if (rc)
2025 goto error_exit;
2026 }
2027
2028 /* Now, when we are done with the ramrod - clean up the registry */
2029 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
2030 struct ecore_exeq_elem) {
2031 cmd = elem->cmd_data.vlan_mac.cmd;
2032 if ((cmd == ECORE_VLAN_MAC_DEL) ||
2033 (cmd == ECORE_VLAN_MAC_MOVE)) {
2034 reg_elem = o->check_del(sc, o,
2035 &elem->cmd_data.vlan_mac.u);
2036
2037 ECORE_DBG_BREAK_IF(!reg_elem);
2038
2039 o->put_cam_offset(o, reg_elem->cam_offset);
2040 ECORE_LIST_REMOVE_ENTRY(®_elem->link, &o->head);
2041 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
2042 }
2043 }
2044
2045 if (!drv_only)
2046 return ECORE_PENDING;
2047 else
2048 return ECORE_SUCCESS;
2049
2050 error_exit:
2051 r->clear_pending(r);
2052
2053 /* Cleanup a registry in case of a failure */
2054 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
2055 struct ecore_exeq_elem) {
2056 cmd = elem->cmd_data.vlan_mac.cmd;
2057
2058 if (cmd == ECORE_VLAN_MAC_MOVE)
2059 cam_obj = elem->cmd_data.vlan_mac.target_obj;
2060 else
2061 cam_obj = o;
2062
2063 /* Delete all newly added above entries */
2064 if (!restore &&
2065 ((cmd == ECORE_VLAN_MAC_ADD) ||
2066 (cmd == ECORE_VLAN_MAC_MOVE))) {
2067 reg_elem = o->check_del(sc, cam_obj,
2068 &elem->cmd_data.vlan_mac.u);
2069 if (reg_elem) {
2070 ECORE_LIST_REMOVE_ENTRY(®_elem->link,
2071 &cam_obj->head);
2072 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
2073 }
2074 }
2075 }
2076
2077 return rc;
2078 }
2079
ecore_vlan_mac_push_new_cmd(struct bxe_softc * sc,struct ecore_vlan_mac_ramrod_params * p)2080 static inline int ecore_vlan_mac_push_new_cmd(
2081 struct bxe_softc *sc,
2082 struct ecore_vlan_mac_ramrod_params *p)
2083 {
2084 struct ecore_exeq_elem *elem;
2085 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2086 bool restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
2087
2088 /* Allocate the execution queue element */
2089 elem = ecore_exe_queue_alloc_elem(sc);
2090 if (!elem)
2091 return ECORE_NOMEM;
2092
2093 /* Set the command 'length' */
2094 switch (p->user_req.cmd) {
2095 case ECORE_VLAN_MAC_MOVE:
2096 elem->cmd_len = 2;
2097 break;
2098 default:
2099 elem->cmd_len = 1;
2100 }
2101
2102 /* Fill the object specific info */
2103 ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
2104
2105 /* Try to add a new command to the pending list */
2106 return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
2107 }
2108
2109 /**
2110 * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
2111 *
2112 * @sc: device handle
2113 * @p:
2114 *
2115 */
ecore_config_vlan_mac(struct bxe_softc * sc,struct ecore_vlan_mac_ramrod_params * p)2116 int ecore_config_vlan_mac(struct bxe_softc *sc,
2117 struct ecore_vlan_mac_ramrod_params *p)
2118 {
2119 int rc = ECORE_SUCCESS;
2120 struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
2121 unsigned long *ramrod_flags = &p->ramrod_flags;
2122 bool cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
2123 struct ecore_raw_obj *raw = &o->raw;
2124
2125 /*
2126 * Add new elements to the execution list for commands that require it.
2127 */
2128 if (!cont) {
2129 rc = ecore_vlan_mac_push_new_cmd(sc, p);
2130 if (rc)
2131 return rc;
2132 }
2133
2134 /* If nothing will be executed further in this iteration we want to
2135 * return PENDING if there are pending commands
2136 */
2137 if (!ecore_exe_queue_empty(&o->exe_queue))
2138 rc = ECORE_PENDING;
2139
2140 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
2141 ECORE_MSG(sc, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
2142 raw->clear_pending(raw);
2143 }
2144
2145 /* Execute commands if required */
2146 if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
2147 ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
2148 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
2149 &p->ramrod_flags);
2150 if (rc < 0)
2151 return rc;
2152 }
2153
2154 /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
2155 * then user want to wait until the last command is done.
2156 */
2157 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2158 /* Wait maximum for the current exe_queue length iterations plus
2159 * one (for the current pending command).
2160 */
2161 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
2162
2163 while (!ecore_exe_queue_empty(&o->exe_queue) &&
2164 max_iterations--) {
2165
2166 /* Wait for the current command to complete */
2167 rc = raw->wait_comp(sc, raw);
2168 if (rc)
2169 return rc;
2170
2171 /* Make a next step */
2172 rc = __ecore_vlan_mac_execute_step(sc,
2173 p->vlan_mac_obj,
2174 &p->ramrod_flags);
2175 if (rc < 0)
2176 return rc;
2177 }
2178
2179 return ECORE_SUCCESS;
2180 }
2181
2182 return rc;
2183 }
2184
2185 /**
2186 * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
2187 *
2188 * @sc: device handle
2189 * @o:
2190 * @vlan_mac_flags:
2191 * @ramrod_flags: execution flags to be used for this deletion
2192 *
2193 * if the last operation has completed successfully and there are no
2194 * more elements left, positive value if the last operation has completed
2195 * successfully and there are more previously configured elements, negative
2196 * value is current operation has failed.
2197 */
ecore_vlan_mac_del_all(struct bxe_softc * sc,struct ecore_vlan_mac_obj * o,unsigned long * vlan_mac_flags,unsigned long * ramrod_flags)2198 static int ecore_vlan_mac_del_all(struct bxe_softc *sc,
2199 struct ecore_vlan_mac_obj *o,
2200 unsigned long *vlan_mac_flags,
2201 unsigned long *ramrod_flags)
2202 {
2203 struct ecore_vlan_mac_registry_elem *pos = NULL;
2204 struct ecore_vlan_mac_ramrod_params p;
2205 struct ecore_exe_queue_obj *exeq = &o->exe_queue;
2206 struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
2207 unsigned long flags;
2208 int read_lock;
2209 int rc = 0;
2210
2211 /* Clear pending commands first */
2212
2213 ECORE_SPIN_LOCK_BH(&exeq->lock);
2214
2215 ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
2216 &exeq->exe_queue, link,
2217 struct ecore_exeq_elem) {
2218 flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
2219 if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
2220 ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2221 rc = exeq->remove(sc, exeq->owner, exeq_pos);
2222 if (rc) {
2223 ECORE_ERR("Failed to remove command\n");
2224 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2225 return rc;
2226 }
2227 ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
2228 &exeq->exe_queue);
2229 ecore_exe_queue_free_elem(sc, exeq_pos);
2230 }
2231 }
2232
2233 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
2234
2235 /* Prepare a command request */
2236 ECORE_MEMSET(&p, 0, sizeof(p));
2237 p.vlan_mac_obj = o;
2238 p.ramrod_flags = *ramrod_flags;
2239 p.user_req.cmd = ECORE_VLAN_MAC_DEL;
2240
2241 /* Add all but the last VLAN-MAC to the execution queue without actually
2242 * execution anything.
2243 */
2244 ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
2245 ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
2246 ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2247
2248 ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
2249 read_lock = ecore_vlan_mac_h_read_lock(sc, o);
2250 if (read_lock != ECORE_SUCCESS)
2251 return read_lock;
2252
2253 ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
2254 struct ecore_vlan_mac_registry_elem) {
2255 flags = pos->vlan_mac_flags;
2256 if (ECORE_VLAN_MAC_CMP_FLAGS(flags) ==
2257 ECORE_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
2258 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
2259 ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
2260 rc = ecore_config_vlan_mac(sc, &p);
2261 if (rc < 0) {
2262 ECORE_ERR("Failed to add a new DEL command\n");
2263 ecore_vlan_mac_h_read_unlock(sc, o);
2264 return rc;
2265 }
2266 }
2267 }
2268
2269 ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
2270 ecore_vlan_mac_h_read_unlock(sc, o);
2271
2272 p.ramrod_flags = *ramrod_flags;
2273 ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
2274
2275 return ecore_config_vlan_mac(sc, &p);
2276 }
2277
ecore_init_raw_obj(struct ecore_raw_obj * raw,uint8_t cl_id,uint32_t cid,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,unsigned long * pstate,ecore_obj_type type)2278 static inline void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
2279 uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping, int state,
2280 unsigned long *pstate, ecore_obj_type type)
2281 {
2282 raw->func_id = func_id;
2283 raw->cid = cid;
2284 raw->cl_id = cl_id;
2285 raw->rdata = rdata;
2286 raw->rdata_mapping = rdata_mapping;
2287 raw->state = state;
2288 raw->pstate = pstate;
2289 raw->obj_type = type;
2290 raw->check_pending = ecore_raw_check_pending;
2291 raw->clear_pending = ecore_raw_clear_pending;
2292 raw->set_pending = ecore_raw_set_pending;
2293 raw->wait_comp = ecore_raw_wait;
2294 }
2295
ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj * o,uint8_t cl_id,uint32_t cid,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,unsigned long * pstate,ecore_obj_type type,struct ecore_credit_pool_obj * macs_pool,struct ecore_credit_pool_obj * vlans_pool)2296 static inline void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
2297 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata, ecore_dma_addr_t rdata_mapping,
2298 int state, unsigned long *pstate, ecore_obj_type type,
2299 struct ecore_credit_pool_obj *macs_pool,
2300 struct ecore_credit_pool_obj *vlans_pool)
2301 {
2302 ECORE_LIST_INIT(&o->head);
2303 o->head_reader = 0;
2304 o->head_exe_request = FALSE;
2305 o->saved_ramrod_flags = 0;
2306
2307 o->macs_pool = macs_pool;
2308 o->vlans_pool = vlans_pool;
2309
2310 o->delete_all = ecore_vlan_mac_del_all;
2311 o->restore = ecore_vlan_mac_restore;
2312 o->complete = ecore_complete_vlan_mac;
2313 o->wait = ecore_wait_vlan_mac;
2314
2315 ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2316 state, pstate, type);
2317 }
2318
ecore_init_mac_obj(struct bxe_softc * sc,struct ecore_vlan_mac_obj * mac_obj,uint8_t cl_id,uint32_t cid,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,unsigned long * pstate,ecore_obj_type type,struct ecore_credit_pool_obj * macs_pool)2319 void ecore_init_mac_obj(struct bxe_softc *sc,
2320 struct ecore_vlan_mac_obj *mac_obj,
2321 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2322 ecore_dma_addr_t rdata_mapping, int state,
2323 unsigned long *pstate, ecore_obj_type type,
2324 struct ecore_credit_pool_obj *macs_pool)
2325 {
2326 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
2327
2328 ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
2329 rdata_mapping, state, pstate, type,
2330 macs_pool, NULL);
2331
2332 /* CAM credit pool handling */
2333 mac_obj->get_credit = ecore_get_credit_mac;
2334 mac_obj->put_credit = ecore_put_credit_mac;
2335 mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2336 mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2337
2338 if (CHIP_IS_E1x(sc)) {
2339 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
2340 mac_obj->check_del = ecore_check_mac_del;
2341 mac_obj->check_add = ecore_check_mac_add;
2342 mac_obj->check_move = ecore_check_move_always_err;
2343 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2344
2345 /* Exe Queue */
2346 ecore_exe_queue_init(sc,
2347 &mac_obj->exe_queue, 1, qable_obj,
2348 ecore_validate_vlan_mac,
2349 ecore_remove_vlan_mac,
2350 ecore_optimize_vlan_mac,
2351 ecore_execute_vlan_mac,
2352 ecore_exeq_get_mac);
2353 } else {
2354 mac_obj->set_one_rule = ecore_set_one_mac_e2;
2355 mac_obj->check_del = ecore_check_mac_del;
2356 mac_obj->check_add = ecore_check_mac_add;
2357 mac_obj->check_move = ecore_check_move;
2358 mac_obj->ramrod_cmd =
2359 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2360 mac_obj->get_n_elements = ecore_get_n_elements;
2361
2362 /* Exe Queue */
2363 ecore_exe_queue_init(sc,
2364 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
2365 qable_obj, ecore_validate_vlan_mac,
2366 ecore_remove_vlan_mac,
2367 ecore_optimize_vlan_mac,
2368 ecore_execute_vlan_mac,
2369 ecore_exeq_get_mac);
2370 }
2371 }
2372
ecore_init_vlan_obj(struct bxe_softc * sc,struct ecore_vlan_mac_obj * vlan_obj,uint8_t cl_id,uint32_t cid,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,unsigned long * pstate,ecore_obj_type type,struct ecore_credit_pool_obj * vlans_pool)2373 void ecore_init_vlan_obj(struct bxe_softc *sc,
2374 struct ecore_vlan_mac_obj *vlan_obj,
2375 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2376 ecore_dma_addr_t rdata_mapping, int state,
2377 unsigned long *pstate, ecore_obj_type type,
2378 struct ecore_credit_pool_obj *vlans_pool)
2379 {
2380 union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)vlan_obj;
2381
2382 ecore_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
2383 rdata_mapping, state, pstate, type, NULL,
2384 vlans_pool);
2385
2386 vlan_obj->get_credit = ecore_get_credit_vlan;
2387 vlan_obj->put_credit = ecore_put_credit_vlan;
2388 vlan_obj->get_cam_offset = ecore_get_cam_offset_vlan;
2389 vlan_obj->put_cam_offset = ecore_put_cam_offset_vlan;
2390
2391 if (CHIP_IS_E1x(sc)) {
2392 ECORE_ERR("Do not support chips others than E2 and newer\n");
2393 ECORE_BUG();
2394 } else {
2395 vlan_obj->set_one_rule = ecore_set_one_vlan_e2;
2396 vlan_obj->check_del = ecore_check_vlan_del;
2397 vlan_obj->check_add = ecore_check_vlan_add;
2398 vlan_obj->check_move = ecore_check_move;
2399 vlan_obj->ramrod_cmd =
2400 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2401 vlan_obj->get_n_elements = ecore_get_n_elements;
2402
2403 /* Exe Queue */
2404 ecore_exe_queue_init(sc,
2405 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
2406 qable_obj, ecore_validate_vlan_mac,
2407 ecore_remove_vlan_mac,
2408 ecore_optimize_vlan_mac,
2409 ecore_execute_vlan_mac,
2410 ecore_exeq_get_vlan);
2411 }
2412 }
2413
ecore_init_vlan_mac_obj(struct bxe_softc * sc,struct ecore_vlan_mac_obj * vlan_mac_obj,uint8_t cl_id,uint32_t cid,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,unsigned long * pstate,ecore_obj_type type,struct ecore_credit_pool_obj * macs_pool,struct ecore_credit_pool_obj * vlans_pool)2414 void ecore_init_vlan_mac_obj(struct bxe_softc *sc,
2415 struct ecore_vlan_mac_obj *vlan_mac_obj,
2416 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2417 ecore_dma_addr_t rdata_mapping, int state,
2418 unsigned long *pstate, ecore_obj_type type,
2419 struct ecore_credit_pool_obj *macs_pool,
2420 struct ecore_credit_pool_obj *vlans_pool)
2421 {
2422 union ecore_qable_obj *qable_obj =
2423 (union ecore_qable_obj *)vlan_mac_obj;
2424
2425 ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
2426 rdata_mapping, state, pstate, type,
2427 macs_pool, vlans_pool);
2428
2429 /* CAM pool handling */
2430 vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2431 vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2432 /* CAM offset is relevant for 57710 and 57711 chips only which have a
2433 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2434 * will be taken from MACs' pool object only.
2435 */
2436 vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2437 vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2438
2439 if (CHIP_IS_E1(sc)) {
2440 ECORE_ERR("Do not support chips others than E2\n");
2441 ECORE_BUG();
2442 } else if (CHIP_IS_E1H(sc)) {
2443 vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e1h;
2444 vlan_mac_obj->check_del = ecore_check_vlan_mac_del;
2445 vlan_mac_obj->check_add = ecore_check_vlan_mac_add;
2446 vlan_mac_obj->check_move = ecore_check_move_always_err;
2447 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
2448
2449 /* Exe Queue */
2450 ecore_exe_queue_init(sc,
2451 &vlan_mac_obj->exe_queue, 1, qable_obj,
2452 ecore_validate_vlan_mac,
2453 ecore_remove_vlan_mac,
2454 ecore_optimize_vlan_mac,
2455 ecore_execute_vlan_mac,
2456 ecore_exeq_get_vlan_mac);
2457 } else {
2458 vlan_mac_obj->set_one_rule = ecore_set_one_vlan_mac_e2;
2459 vlan_mac_obj->check_del = ecore_check_vlan_mac_del;
2460 vlan_mac_obj->check_add = ecore_check_vlan_mac_add;
2461 vlan_mac_obj->check_move = ecore_check_move;
2462 vlan_mac_obj->ramrod_cmd =
2463 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2464
2465 /* Exe Queue */
2466 ecore_exe_queue_init(sc,
2467 &vlan_mac_obj->exe_queue,
2468 CLASSIFY_RULES_COUNT,
2469 qable_obj, ecore_validate_vlan_mac,
2470 ecore_remove_vlan_mac,
2471 ecore_optimize_vlan_mac,
2472 ecore_execute_vlan_mac,
2473 ecore_exeq_get_vlan_mac);
2474 }
2475 }
2476
ecore_init_vxlan_fltr_obj(struct bxe_softc * sc,struct ecore_vlan_mac_obj * vlan_mac_obj,uint8_t cl_id,uint32_t cid,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,unsigned long * pstate,ecore_obj_type type,struct ecore_credit_pool_obj * macs_pool,struct ecore_credit_pool_obj * vlans_pool)2477 void ecore_init_vxlan_fltr_obj(struct bxe_softc *sc,
2478 struct ecore_vlan_mac_obj *vlan_mac_obj,
2479 uint8_t cl_id, uint32_t cid, uint8_t func_id, void *rdata,
2480 ecore_dma_addr_t rdata_mapping, int state,
2481 unsigned long *pstate, ecore_obj_type type,
2482 struct ecore_credit_pool_obj *macs_pool,
2483 struct ecore_credit_pool_obj *vlans_pool)
2484 {
2485 union ecore_qable_obj *qable_obj =
2486 (union ecore_qable_obj *)vlan_mac_obj;
2487
2488 ecore_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id,
2489 rdata, rdata_mapping, state, pstate,
2490 type, macs_pool, vlans_pool);
2491
2492 /* CAM pool handling */
2493 vlan_mac_obj->get_credit = ecore_get_credit_vlan_mac;
2494 vlan_mac_obj->put_credit = ecore_put_credit_vlan_mac;
2495 /* CAM offset is relevant for 57710 and 57711 chips only which have a
2496 * single CAM for both MACs and VLAN-MAC pairs. So the offset
2497 * will be taken from MACs' pool object only.
2498 */
2499 vlan_mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
2500 vlan_mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
2501
2502 if (CHIP_IS_E1x(sc)) {
2503 ECORE_ERR("Do not support chips others than E2/E3\n");
2504 ECORE_BUG();
2505 } else {
2506 vlan_mac_obj->set_one_rule = ecore_set_one_vxlan_fltr_e2;
2507 vlan_mac_obj->check_del = ecore_check_vxlan_fltr_del;
2508 vlan_mac_obj->check_add = ecore_check_vxlan_fltr_add;
2509 vlan_mac_obj->check_move = ecore_check_move;
2510 vlan_mac_obj->ramrod_cmd =
2511 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2512
2513 /* Exe Queue */
2514 ecore_exe_queue_init(sc,
2515 &vlan_mac_obj->exe_queue,
2516 CLASSIFY_RULES_COUNT,
2517 qable_obj, ecore_validate_vlan_mac,
2518 ecore_remove_vlan_mac,
2519 ecore_optimize_vlan_mac,
2520 ecore_execute_vlan_mac,
2521 ecore_exeq_get_vxlan_fltr);
2522 }
2523 }
2524
2525 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
__storm_memset_mac_filters(struct bxe_softc * sc,struct tstorm_eth_mac_filter_config * mac_filters,uint16_t pf_id)2526 static inline void __storm_memset_mac_filters(struct bxe_softc *sc,
2527 struct tstorm_eth_mac_filter_config *mac_filters,
2528 uint16_t pf_id)
2529 {
2530 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2531
2532 uint32_t addr = BAR_TSTRORM_INTMEM +
2533 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2534
2535 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)mac_filters);
2536 }
2537
ecore_set_rx_mode_e1x(struct bxe_softc * sc,struct ecore_rx_mode_ramrod_params * p)2538 static int ecore_set_rx_mode_e1x(struct bxe_softc *sc,
2539 struct ecore_rx_mode_ramrod_params *p)
2540 {
2541 /* update the sc MAC filter structure */
2542 uint32_t mask = (1 << p->cl_id);
2543
2544 struct tstorm_eth_mac_filter_config *mac_filters =
2545 (struct tstorm_eth_mac_filter_config *)p->rdata;
2546
2547 /* initial setting is drop-all */
2548 uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
2549 uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2550 uint8_t unmatched_unicast = 0;
2551
2552 /* In e1x there we only take into account rx accept flag since tx switching
2553 * isn't enabled. */
2554 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
2555 /* accept matched ucast */
2556 drop_all_ucast = 0;
2557
2558 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
2559 /* accept matched mcast */
2560 drop_all_mcast = 0;
2561
2562 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2563 /* accept all mcast */
2564 drop_all_ucast = 0;
2565 accp_all_ucast = 1;
2566 }
2567 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2568 /* accept all mcast */
2569 drop_all_mcast = 0;
2570 accp_all_mcast = 1;
2571 }
2572 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
2573 /* accept (all) bcast */
2574 accp_all_bcast = 1;
2575 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2576 /* accept unmatched unicasts */
2577 unmatched_unicast = 1;
2578
2579 mac_filters->ucast_drop_all = drop_all_ucast ?
2580 mac_filters->ucast_drop_all | mask :
2581 mac_filters->ucast_drop_all & ~mask;
2582
2583 mac_filters->mcast_drop_all = drop_all_mcast ?
2584 mac_filters->mcast_drop_all | mask :
2585 mac_filters->mcast_drop_all & ~mask;
2586
2587 mac_filters->ucast_accept_all = accp_all_ucast ?
2588 mac_filters->ucast_accept_all | mask :
2589 mac_filters->ucast_accept_all & ~mask;
2590
2591 mac_filters->mcast_accept_all = accp_all_mcast ?
2592 mac_filters->mcast_accept_all | mask :
2593 mac_filters->mcast_accept_all & ~mask;
2594
2595 mac_filters->bcast_accept_all = accp_all_bcast ?
2596 mac_filters->bcast_accept_all | mask :
2597 mac_filters->bcast_accept_all & ~mask;
2598
2599 mac_filters->unmatched_unicast = unmatched_unicast ?
2600 mac_filters->unmatched_unicast | mask :
2601 mac_filters->unmatched_unicast & ~mask;
2602
2603 ECORE_MSG(sc, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2604 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2605 mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2606 mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2607 mac_filters->bcast_accept_all);
2608
2609 /* write the MAC filter structure*/
2610 __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2611
2612 /* The operation is completed */
2613 ECORE_CLEAR_BIT(p->state, p->pstate);
2614 ECORE_SMP_MB_AFTER_CLEAR_BIT();
2615
2616 return ECORE_SUCCESS;
2617 }
2618
2619 /* Setup ramrod data */
ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,struct eth_classify_header * hdr,uint8_t rule_cnt)2620 static inline void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid,
2621 struct eth_classify_header *hdr,
2622 uint8_t rule_cnt)
2623 {
2624 hdr->echo = ECORE_CPU_TO_LE32(cid);
2625 hdr->rule_cnt = rule_cnt;
2626 }
2627
ecore_rx_mode_set_cmd_state_e2(struct bxe_softc * sc,unsigned long * accept_flags,struct eth_filter_rules_cmd * cmd,bool clear_accept_all)2628 static inline void ecore_rx_mode_set_cmd_state_e2(struct bxe_softc *sc,
2629 unsigned long *accept_flags,
2630 struct eth_filter_rules_cmd *cmd,
2631 bool clear_accept_all)
2632 {
2633 uint16_t state;
2634
2635 /* start with 'drop-all' */
2636 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2637 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2638
2639 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2640 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2641
2642 if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2643 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2644
2645 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2646 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2647 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2648 }
2649
2650 if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2651 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2652 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2653 }
2654 if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2655 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2656
2657 if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2658 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2659 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2660 }
2661 if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2662 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2663
2664 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2665 if (clear_accept_all) {
2666 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2667 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2668 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2669 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2670 }
2671
2672 cmd->state = ECORE_CPU_TO_LE16(state);
2673 }
2674
ecore_set_rx_mode_e2(struct bxe_softc * sc,struct ecore_rx_mode_ramrod_params * p)2675 static int ecore_set_rx_mode_e2(struct bxe_softc *sc,
2676 struct ecore_rx_mode_ramrod_params *p)
2677 {
2678 struct eth_filter_rules_ramrod_data *data = p->rdata;
2679 int rc;
2680 uint8_t rule_idx = 0;
2681
2682 /* Reset the ramrod data buffer */
2683 ECORE_MEMSET(data, 0, sizeof(*data));
2684
2685 /* Setup ramrod data */
2686
2687 /* Tx (internal switching) */
2688 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2689 data->rules[rule_idx].client_id = p->cl_id;
2690 data->rules[rule_idx].func_id = p->func_id;
2691
2692 data->rules[rule_idx].cmd_general_data =
2693 ETH_FILTER_RULES_CMD_TX_CMD;
2694
2695 ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2696 &(data->rules[rule_idx++]),
2697 FALSE);
2698 }
2699
2700 /* Rx */
2701 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2702 data->rules[rule_idx].client_id = p->cl_id;
2703 data->rules[rule_idx].func_id = p->func_id;
2704
2705 data->rules[rule_idx].cmd_general_data =
2706 ETH_FILTER_RULES_CMD_RX_CMD;
2707
2708 ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2709 &(data->rules[rule_idx++]),
2710 FALSE);
2711 }
2712
2713 /* If FCoE Queue configuration has been requested configure the Rx and
2714 * internal switching modes for this queue in separate rules.
2715 *
2716 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2717 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2718 */
2719 if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2720 /* Tx (internal switching) */
2721 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2722 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2723 data->rules[rule_idx].func_id = p->func_id;
2724
2725 data->rules[rule_idx].cmd_general_data =
2726 ETH_FILTER_RULES_CMD_TX_CMD;
2727
2728 ecore_rx_mode_set_cmd_state_e2(sc, &p->tx_accept_flags,
2729 &(data->rules[rule_idx]),
2730 TRUE);
2731 rule_idx++;
2732 }
2733
2734 /* Rx */
2735 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2736 data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2737 data->rules[rule_idx].func_id = p->func_id;
2738
2739 data->rules[rule_idx].cmd_general_data =
2740 ETH_FILTER_RULES_CMD_RX_CMD;
2741
2742 ecore_rx_mode_set_cmd_state_e2(sc, &p->rx_accept_flags,
2743 &(data->rules[rule_idx]),
2744 TRUE);
2745 rule_idx++;
2746 }
2747 }
2748
2749 /* Set the ramrod header (most importantly - number of rules to
2750 * configure).
2751 */
2752 ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2753
2754 ECORE_MSG(sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
2755 data->header.rule_cnt, p->rx_accept_flags,
2756 p->tx_accept_flags);
2757
2758 /* No need for an explicit memory barrier here as long as we
2759 * ensure the ordering of writing to the SPQ element
2760 * and updating of the SPQ producer which involves a memory
2761 * read. If the memory read is removed we will have to put a
2762 * full memory barrier there (inside ecore_sp_post()).
2763 */
2764
2765 /* Send a ramrod */
2766 rc = ecore_sp_post(sc,
2767 RAMROD_CMD_ID_ETH_FILTER_RULES,
2768 p->cid,
2769 p->rdata_mapping,
2770 ETH_CONNECTION_TYPE);
2771 if (rc)
2772 return rc;
2773
2774 /* Ramrod completion is pending */
2775 return ECORE_PENDING;
2776 }
2777
ecore_wait_rx_mode_comp_e2(struct bxe_softc * sc,struct ecore_rx_mode_ramrod_params * p)2778 static int ecore_wait_rx_mode_comp_e2(struct bxe_softc *sc,
2779 struct ecore_rx_mode_ramrod_params *p)
2780 {
2781 return ecore_state_wait(sc, p->state, p->pstate);
2782 }
2783
ecore_empty_rx_mode_wait(struct bxe_softc * sc,struct ecore_rx_mode_ramrod_params * p)2784 static int ecore_empty_rx_mode_wait(struct bxe_softc *sc,
2785 struct ecore_rx_mode_ramrod_params *p)
2786 {
2787 /* Do nothing */
2788 return ECORE_SUCCESS;
2789 }
2790
ecore_config_rx_mode(struct bxe_softc * sc,struct ecore_rx_mode_ramrod_params * p)2791 int ecore_config_rx_mode(struct bxe_softc *sc,
2792 struct ecore_rx_mode_ramrod_params *p)
2793 {
2794 int rc;
2795
2796 /* Configure the new classification in the chip */
2797 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2798 if (rc < 0)
2799 return rc;
2800
2801 /* Wait for a ramrod completion if was requested */
2802 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2803 rc = p->rx_mode_obj->wait_comp(sc, p);
2804 if (rc)
2805 return rc;
2806 }
2807
2808 return rc;
2809 }
2810
ecore_init_rx_mode_obj(struct bxe_softc * sc,struct ecore_rx_mode_obj * o)2811 void ecore_init_rx_mode_obj(struct bxe_softc *sc,
2812 struct ecore_rx_mode_obj *o)
2813 {
2814 if (CHIP_IS_E1x(sc)) {
2815 o->wait_comp = ecore_empty_rx_mode_wait;
2816 o->config_rx_mode = ecore_set_rx_mode_e1x;
2817 } else {
2818 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2819 o->config_rx_mode = ecore_set_rx_mode_e2;
2820 }
2821 }
2822
2823 /********************* Multicast verbs: SET, CLEAR ****************************/
ecore_mcast_bin_from_mac(uint8_t * mac)2824 static inline uint8_t ecore_mcast_bin_from_mac(uint8_t *mac)
2825 {
2826 return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2827 }
2828
2829 struct ecore_mcast_mac_elem {
2830 ecore_list_entry_t link;
2831 uint8_t mac[ETH_ALEN];
2832 uint8_t pad[2]; /* For a natural alignment of the following buffer */
2833 };
2834
2835 struct ecore_pending_mcast_cmd {
2836 ecore_list_entry_t link;
2837 int type; /* ECORE_MCAST_CMD_X */
2838 union {
2839 ecore_list_t macs_head;
2840 uint32_t macs_num; /* Needed for DEL command */
2841 int next_bin; /* Needed for RESTORE flow with aprox match */
2842 } data;
2843
2844 bool done; /* set to TRUE, when the command has been handled,
2845 * practically used in 57712 handling only, where one pending
2846 * command may be handled in a few operations. As long as for
2847 * other chips every operation handling is completed in a
2848 * single ramrod, there is no need to utilize this field.
2849 */
2850 };
2851
ecore_mcast_wait(struct bxe_softc * sc,struct ecore_mcast_obj * o)2852 static int ecore_mcast_wait(struct bxe_softc *sc,
2853 struct ecore_mcast_obj *o)
2854 {
2855 if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2856 o->raw.wait_comp(sc, &o->raw))
2857 return ECORE_TIMEOUT;
2858
2859 return ECORE_SUCCESS;
2860 }
2861
ecore_mcast_enqueue_cmd(struct bxe_softc * sc,struct ecore_mcast_obj * o,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)2862 static int ecore_mcast_enqueue_cmd(struct bxe_softc *sc,
2863 struct ecore_mcast_obj *o,
2864 struct ecore_mcast_ramrod_params *p,
2865 enum ecore_mcast_cmd cmd)
2866 {
2867 int total_sz;
2868 struct ecore_pending_mcast_cmd *new_cmd;
2869 struct ecore_mcast_mac_elem *cur_mac = NULL;
2870 struct ecore_mcast_list_elem *pos;
2871 int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2872 p->mcast_list_len : 0);
2873
2874 /* If the command is empty ("handle pending commands only"), break */
2875 if (!p->mcast_list_len)
2876 return ECORE_SUCCESS;
2877
2878 total_sz = sizeof(*new_cmd) +
2879 macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2880
2881 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2882 new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2883
2884 if (!new_cmd)
2885 return ECORE_NOMEM;
2886
2887 ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d\n",
2888 cmd, macs_list_len);
2889
2890 ECORE_LIST_INIT(&new_cmd->data.macs_head);
2891
2892 new_cmd->type = cmd;
2893 new_cmd->done = FALSE;
2894
2895 switch (cmd) {
2896 case ECORE_MCAST_CMD_ADD:
2897 cur_mac = (struct ecore_mcast_mac_elem *)
2898 ((uint8_t *)new_cmd + sizeof(*new_cmd));
2899
2900 /* Push the MACs of the current command into the pending command
2901 * MACs list: FIFO
2902 */
2903 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2904 struct ecore_mcast_list_elem) {
2905 ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2906 ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2907 &new_cmd->data.macs_head);
2908 cur_mac++;
2909 }
2910
2911 break;
2912
2913 case ECORE_MCAST_CMD_DEL:
2914 new_cmd->data.macs_num = p->mcast_list_len;
2915 break;
2916
2917 case ECORE_MCAST_CMD_RESTORE:
2918 new_cmd->data.next_bin = 0;
2919 break;
2920
2921 default:
2922 ECORE_FREE(sc, new_cmd, total_sz);
2923 ECORE_ERR("Unknown command: %d\n", cmd);
2924 return ECORE_INVAL;
2925 }
2926
2927 /* Push the new pending command to the tail of the pending list: FIFO */
2928 ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2929
2930 o->set_sched(o);
2931
2932 return ECORE_PENDING;
2933 }
2934
2935 /**
2936 * ecore_mcast_get_next_bin - get the next set bin (index)
2937 *
2938 * @o:
2939 * @last: index to start looking from (including)
2940 *
2941 * returns the next found (set) bin or a negative value if none is found.
2942 */
ecore_mcast_get_next_bin(struct ecore_mcast_obj * o,int last)2943 static inline int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2944 {
2945 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2946
2947 for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2948 if (o->registry.aprox_match.vec[i])
2949 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2950 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2951 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2952 vec, cur_bit)) {
2953 return cur_bit;
2954 }
2955 }
2956 inner_start = 0;
2957 }
2958
2959 /* None found */
2960 return -1;
2961 }
2962
2963 /**
2964 * ecore_mcast_clear_first_bin - find the first set bin and clear it
2965 *
2966 * @o:
2967 *
2968 * returns the index of the found bin or -1 if none is found
2969 */
ecore_mcast_clear_first_bin(struct ecore_mcast_obj * o)2970 static inline int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2971 {
2972 int cur_bit = ecore_mcast_get_next_bin(o, 0);
2973
2974 if (cur_bit >= 0)
2975 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2976
2977 return cur_bit;
2978 }
2979
ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj * o)2980 static inline uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2981 {
2982 struct ecore_raw_obj *raw = &o->raw;
2983 uint8_t rx_tx_flag = 0;
2984
2985 if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2986 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2987 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2988
2989 if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2990 (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2991 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2992
2993 return rx_tx_flag;
2994 }
2995
ecore_mcast_set_one_rule_e2(struct bxe_softc * sc,struct ecore_mcast_obj * o,int idx,union ecore_mcast_config_data * cfg_data,enum ecore_mcast_cmd cmd)2996 static void ecore_mcast_set_one_rule_e2(struct bxe_softc *sc,
2997 struct ecore_mcast_obj *o, int idx,
2998 union ecore_mcast_config_data *cfg_data,
2999 enum ecore_mcast_cmd cmd)
3000 {
3001 struct ecore_raw_obj *r = &o->raw;
3002 struct eth_multicast_rules_ramrod_data *data =
3003 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
3004 uint8_t func_id = r->func_id;
3005 uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
3006 int bin;
3007
3008 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
3009 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
3010
3011 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
3012
3013 /* Get a bin and update a bins' vector */
3014 switch (cmd) {
3015 case ECORE_MCAST_CMD_ADD:
3016 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
3017 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
3018 break;
3019
3020 case ECORE_MCAST_CMD_DEL:
3021 /* If there were no more bins to clear
3022 * (ecore_mcast_clear_first_bin() returns -1) then we would
3023 * clear any (0xff) bin.
3024 * See ecore_mcast_validate_e2() for explanation when it may
3025 * happen.
3026 */
3027 bin = ecore_mcast_clear_first_bin(o);
3028 break;
3029
3030 case ECORE_MCAST_CMD_RESTORE:
3031 bin = cfg_data->bin;
3032 break;
3033
3034 default:
3035 ECORE_ERR("Unknown command: %d\n", cmd);
3036 return;
3037 }
3038
3039 ECORE_MSG(sc, "%s bin %d\n",
3040 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
3041 "Setting" : "Clearing"), bin);
3042
3043 data->rules[idx].bin_id = (uint8_t)bin;
3044 data->rules[idx].func_id = func_id;
3045 data->rules[idx].engine_id = o->engine_id;
3046 }
3047
3048 /**
3049 * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
3050 *
3051 * @sc: device handle
3052 * @o:
3053 * @start_bin: index in the registry to start from (including)
3054 * @rdata_idx: index in the ramrod data to start from
3055 *
3056 * returns last handled bin index or -1 if all bins have been handled
3057 */
ecore_mcast_handle_restore_cmd_e2(struct bxe_softc * sc,struct ecore_mcast_obj * o,int start_bin,int * rdata_idx)3058 static inline int ecore_mcast_handle_restore_cmd_e2(
3059 struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_bin,
3060 int *rdata_idx)
3061 {
3062 int cur_bin, cnt = *rdata_idx;
3063 union ecore_mcast_config_data cfg_data = {NULL};
3064
3065 /* go through the registry and configure the bins from it */
3066 for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
3067 cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
3068
3069 cfg_data.bin = (uint8_t)cur_bin;
3070 o->set_one_rule(sc, o, cnt, &cfg_data,
3071 ECORE_MCAST_CMD_RESTORE);
3072
3073 cnt++;
3074
3075 ECORE_MSG(sc, "About to configure a bin %d\n", cur_bin);
3076
3077 /* Break if we reached the maximum number
3078 * of rules.
3079 */
3080 if (cnt >= o->max_cmd_len)
3081 break;
3082 }
3083
3084 *rdata_idx = cnt;
3085
3086 return cur_bin;
3087 }
3088
ecore_mcast_hdl_pending_add_e2(struct bxe_softc * sc,struct ecore_mcast_obj * o,struct ecore_pending_mcast_cmd * cmd_pos,int * line_idx)3089 static inline void ecore_mcast_hdl_pending_add_e2(struct bxe_softc *sc,
3090 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
3091 int *line_idx)
3092 {
3093 struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
3094 int cnt = *line_idx;
3095 union ecore_mcast_config_data cfg_data = {NULL};
3096
3097 ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
3098 &cmd_pos->data.macs_head, link, struct ecore_mcast_mac_elem) {
3099
3100 cfg_data.mac = &pmac_pos->mac[0];
3101 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
3102
3103 cnt++;
3104
3105 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3106 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3107
3108 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
3109 &cmd_pos->data.macs_head);
3110
3111 /* Break if we reached the maximum number
3112 * of rules.
3113 */
3114 if (cnt >= o->max_cmd_len)
3115 break;
3116 }
3117
3118 *line_idx = cnt;
3119
3120 /* if no more MACs to configure - we are done */
3121 if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
3122 cmd_pos->done = TRUE;
3123 }
3124
ecore_mcast_hdl_pending_del_e2(struct bxe_softc * sc,struct ecore_mcast_obj * o,struct ecore_pending_mcast_cmd * cmd_pos,int * line_idx)3125 static inline void ecore_mcast_hdl_pending_del_e2(struct bxe_softc *sc,
3126 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
3127 int *line_idx)
3128 {
3129 int cnt = *line_idx;
3130
3131 while (cmd_pos->data.macs_num) {
3132 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
3133
3134 cnt++;
3135
3136 cmd_pos->data.macs_num--;
3137
3138 ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d\n",
3139 cmd_pos->data.macs_num, cnt);
3140
3141 /* Break if we reached the maximum
3142 * number of rules.
3143 */
3144 if (cnt >= o->max_cmd_len)
3145 break;
3146 }
3147
3148 *line_idx = cnt;
3149
3150 /* If we cleared all bins - we are done */
3151 if (!cmd_pos->data.macs_num)
3152 cmd_pos->done = TRUE;
3153 }
3154
ecore_mcast_hdl_pending_restore_e2(struct bxe_softc * sc,struct ecore_mcast_obj * o,struct ecore_pending_mcast_cmd * cmd_pos,int * line_idx)3155 static inline void ecore_mcast_hdl_pending_restore_e2(struct bxe_softc *sc,
3156 struct ecore_mcast_obj *o, struct ecore_pending_mcast_cmd *cmd_pos,
3157 int *line_idx)
3158 {
3159 cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
3160 line_idx);
3161
3162 if (cmd_pos->data.next_bin < 0)
3163 /* If o->set_restore returned -1 we are done */
3164 cmd_pos->done = TRUE;
3165 else
3166 /* Start from the next bin next time */
3167 cmd_pos->data.next_bin++;
3168 }
3169
ecore_mcast_handle_pending_cmds_e2(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p)3170 static inline int ecore_mcast_handle_pending_cmds_e2(struct bxe_softc *sc,
3171 struct ecore_mcast_ramrod_params *p)
3172 {
3173 struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
3174 int cnt = 0;
3175 struct ecore_mcast_obj *o = p->mcast_obj;
3176
3177 ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
3178 &o->pending_cmds_head, link, struct ecore_pending_mcast_cmd) {
3179 switch (cmd_pos->type) {
3180 case ECORE_MCAST_CMD_ADD:
3181 ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
3182 break;
3183
3184 case ECORE_MCAST_CMD_DEL:
3185 ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
3186 break;
3187
3188 case ECORE_MCAST_CMD_RESTORE:
3189 ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
3190 &cnt);
3191 break;
3192
3193 default:
3194 ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3195 return ECORE_INVAL;
3196 }
3197
3198 /* If the command has been completed - remove it from the list
3199 * and free the memory
3200 */
3201 if (cmd_pos->done) {
3202 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
3203 &o->pending_cmds_head);
3204 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3205 }
3206
3207 /* Break if we reached the maximum number of rules */
3208 if (cnt >= o->max_cmd_len)
3209 break;
3210 }
3211
3212 return cnt;
3213 }
3214
ecore_mcast_hdl_add(struct bxe_softc * sc,struct ecore_mcast_obj * o,struct ecore_mcast_ramrod_params * p,int * line_idx)3215 static inline void ecore_mcast_hdl_add(struct bxe_softc *sc,
3216 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3217 int *line_idx)
3218 {
3219 struct ecore_mcast_list_elem *mlist_pos;
3220 union ecore_mcast_config_data cfg_data = {NULL};
3221 int cnt = *line_idx;
3222
3223 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3224 struct ecore_mcast_list_elem) {
3225 cfg_data.mac = mlist_pos->mac;
3226 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
3227
3228 cnt++;
3229
3230 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3231 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
3232 }
3233
3234 *line_idx = cnt;
3235 }
3236
ecore_mcast_hdl_del(struct bxe_softc * sc,struct ecore_mcast_obj * o,struct ecore_mcast_ramrod_params * p,int * line_idx)3237 static inline void ecore_mcast_hdl_del(struct bxe_softc *sc,
3238 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3239 int *line_idx)
3240 {
3241 int cnt = *line_idx, i;
3242
3243 for (i = 0; i < p->mcast_list_len; i++) {
3244 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
3245
3246 cnt++;
3247
3248 ECORE_MSG(sc, "Deleting MAC. %d left\n",
3249 p->mcast_list_len - i - 1);
3250 }
3251
3252 *line_idx = cnt;
3253 }
3254
3255 /**
3256 * ecore_mcast_handle_current_cmd -
3257 *
3258 * @sc: device handle
3259 * @p:
3260 * @cmd:
3261 * @start_cnt: first line in the ramrod data that may be used
3262 *
3263 * This function is called iff there is enough place for the current command in
3264 * the ramrod data.
3265 * Returns number of lines filled in the ramrod data in total.
3266 */
ecore_mcast_handle_current_cmd(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd,int start_cnt)3267 static inline int ecore_mcast_handle_current_cmd(struct bxe_softc *sc,
3268 struct ecore_mcast_ramrod_params *p,
3269 enum ecore_mcast_cmd cmd,
3270 int start_cnt)
3271 {
3272 struct ecore_mcast_obj *o = p->mcast_obj;
3273 int cnt = start_cnt;
3274
3275 ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3276
3277 switch (cmd) {
3278 case ECORE_MCAST_CMD_ADD:
3279 ecore_mcast_hdl_add(sc, o, p, &cnt);
3280 break;
3281
3282 case ECORE_MCAST_CMD_DEL:
3283 ecore_mcast_hdl_del(sc, o, p, &cnt);
3284 break;
3285
3286 case ECORE_MCAST_CMD_RESTORE:
3287 o->hdl_restore(sc, o, 0, &cnt);
3288 break;
3289
3290 default:
3291 ECORE_ERR("Unknown command: %d\n", cmd);
3292 return ECORE_INVAL;
3293 }
3294
3295 /* The current command has been handled */
3296 p->mcast_list_len = 0;
3297
3298 return cnt;
3299 }
3300
ecore_mcast_validate_e2(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)3301 static int ecore_mcast_validate_e2(struct bxe_softc *sc,
3302 struct ecore_mcast_ramrod_params *p,
3303 enum ecore_mcast_cmd cmd)
3304 {
3305 struct ecore_mcast_obj *o = p->mcast_obj;
3306 int reg_sz = o->get_registry_size(o);
3307
3308 switch (cmd) {
3309 /* DEL command deletes all currently configured MACs */
3310 case ECORE_MCAST_CMD_DEL:
3311 o->set_registry_size(o, 0);
3312 /* Don't break */
3313
3314 /* RESTORE command will restore the entire multicast configuration */
3315 case ECORE_MCAST_CMD_RESTORE:
3316 /* Here we set the approximate amount of work to do, which in
3317 * fact may be only less as some MACs in postponed ADD
3318 * command(s) scheduled before this command may fall into
3319 * the same bin and the actual number of bins set in the
3320 * registry would be less than we estimated here. See
3321 * ecore_mcast_set_one_rule_e2() for further details.
3322 */
3323 p->mcast_list_len = reg_sz;
3324 break;
3325
3326 case ECORE_MCAST_CMD_ADD:
3327 case ECORE_MCAST_CMD_CONT:
3328 /* Here we assume that all new MACs will fall into new bins.
3329 * However we will correct the real registry size after we
3330 * handle all pending commands.
3331 */
3332 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3333 break;
3334
3335 default:
3336 ECORE_ERR("Unknown command: %d\n", cmd);
3337 return ECORE_INVAL;
3338 }
3339
3340 /* Increase the total number of MACs pending to be configured */
3341 o->total_pending_num += p->mcast_list_len;
3342
3343 return ECORE_SUCCESS;
3344 }
3345
ecore_mcast_revert_e2(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,int old_num_bins)3346 static void ecore_mcast_revert_e2(struct bxe_softc *sc,
3347 struct ecore_mcast_ramrod_params *p,
3348 int old_num_bins)
3349 {
3350 struct ecore_mcast_obj *o = p->mcast_obj;
3351
3352 o->set_registry_size(o, old_num_bins);
3353 o->total_pending_num -= p->mcast_list_len;
3354 }
3355
3356 /**
3357 * ecore_mcast_set_rdata_hdr_e2 - sets a header values
3358 *
3359 * @sc: device handle
3360 * @p:
3361 * @len: number of rules to handle
3362 */
ecore_mcast_set_rdata_hdr_e2(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,uint8_t len)3363 static inline void ecore_mcast_set_rdata_hdr_e2(struct bxe_softc *sc,
3364 struct ecore_mcast_ramrod_params *p,
3365 uint8_t len)
3366 {
3367 struct ecore_raw_obj *r = &p->mcast_obj->raw;
3368 struct eth_multicast_rules_ramrod_data *data =
3369 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
3370
3371 data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3372 (ECORE_FILTER_MCAST_PENDING <<
3373 ECORE_SWCID_SHIFT));
3374 data->header.rule_cnt = len;
3375 }
3376
3377 /**
3378 * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
3379 *
3380 * @sc: device handle
3381 * @o:
3382 *
3383 * Recalculate the actual number of set bins in the registry using Brian
3384 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
3385 *
3386 * returns 0 for the compliance with ecore_mcast_refresh_registry_e1().
3387 */
ecore_mcast_refresh_registry_e2(struct bxe_softc * sc,struct ecore_mcast_obj * o)3388 static inline int ecore_mcast_refresh_registry_e2(struct bxe_softc *sc,
3389 struct ecore_mcast_obj *o)
3390 {
3391 int i, cnt = 0;
3392 uint64_t elem;
3393
3394 for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
3395 elem = o->registry.aprox_match.vec[i];
3396 for (; elem; cnt++)
3397 elem &= elem - 1;
3398 }
3399
3400 o->set_registry_size(o, cnt);
3401
3402 return ECORE_SUCCESS;
3403 }
3404
ecore_mcast_setup_e2(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)3405 static int ecore_mcast_setup_e2(struct bxe_softc *sc,
3406 struct ecore_mcast_ramrod_params *p,
3407 enum ecore_mcast_cmd cmd)
3408 {
3409 struct ecore_raw_obj *raw = &p->mcast_obj->raw;
3410 struct ecore_mcast_obj *o = p->mcast_obj;
3411 struct eth_multicast_rules_ramrod_data *data =
3412 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
3413 int cnt = 0, rc;
3414
3415 /* Reset the ramrod data buffer */
3416 ECORE_MEMSET(data, 0, sizeof(*data));
3417
3418 cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
3419
3420 /* If there are no more pending commands - clear SCHEDULED state */
3421 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3422 o->clear_sched(o);
3423
3424 /* The below may be TRUE iff there was enough room in ramrod
3425 * data for all pending commands and for the current
3426 * command. Otherwise the current command would have been added
3427 * to the pending commands and p->mcast_list_len would have been
3428 * zeroed.
3429 */
3430 if (p->mcast_list_len > 0)
3431 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
3432
3433 /* We've pulled out some MACs - update the total number of
3434 * outstanding.
3435 */
3436 o->total_pending_num -= cnt;
3437
3438 /* send a ramrod */
3439 ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
3440 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3441
3442 ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t)cnt);
3443
3444 /* Update a registry size if there are no more pending operations.
3445 *
3446 * We don't want to change the value of the registry size if there are
3447 * pending operations because we want it to always be equal to the
3448 * exact or the approximate number (see ecore_mcast_validate_e2()) of
3449 * set bins after the last requested operation in order to properly
3450 * evaluate the size of the next DEL/RESTORE operation.
3451 *
3452 * Note that we update the registry itself during command(s) handling
3453 * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
3454 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
3455 * with a limited amount of update commands (per MAC/bin) and we don't
3456 * know in this scope what the actual state of bins configuration is
3457 * going to be after this ramrod.
3458 */
3459 if (!o->total_pending_num)
3460 ecore_mcast_refresh_registry_e2(sc, o);
3461
3462 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3463 * RAMROD_PENDING status immediately.
3464 */
3465 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3466 raw->clear_pending(raw);
3467 return ECORE_SUCCESS;
3468 } else {
3469 /* No need for an explicit memory barrier here as long as we
3470 * ensure the ordering of writing to the SPQ element
3471 * and updating of the SPQ producer which involves a memory
3472 * read. If the memory read is removed we will have to put a
3473 * full memory barrier there (inside ecore_sp_post()).
3474 */
3475
3476 /* Send a ramrod */
3477 rc = ecore_sp_post( sc,
3478 RAMROD_CMD_ID_ETH_MULTICAST_RULES,
3479 raw->cid,
3480 raw->rdata_mapping,
3481 ETH_CONNECTION_TYPE);
3482 if (rc)
3483 return rc;
3484
3485 /* Ramrod completion is pending */
3486 return ECORE_PENDING;
3487 }
3488 }
3489
ecore_mcast_validate_e1h(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)3490 static int ecore_mcast_validate_e1h(struct bxe_softc *sc,
3491 struct ecore_mcast_ramrod_params *p,
3492 enum ecore_mcast_cmd cmd)
3493 {
3494 /* Mark, that there is a work to do */
3495 if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
3496 p->mcast_list_len = 1;
3497
3498 return ECORE_SUCCESS;
3499 }
3500
ecore_mcast_revert_e1h(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,int old_num_bins)3501 static void ecore_mcast_revert_e1h(struct bxe_softc *sc,
3502 struct ecore_mcast_ramrod_params *p,
3503 int old_num_bins)
3504 {
3505 /* Do nothing */
3506 }
3507
3508 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
3509 do { \
3510 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
3511 } while (0)
3512
ecore_mcast_hdl_add_e1h(struct bxe_softc * sc,struct ecore_mcast_obj * o,struct ecore_mcast_ramrod_params * p,uint32_t * mc_filter)3513 static inline void ecore_mcast_hdl_add_e1h(struct bxe_softc *sc,
3514 struct ecore_mcast_obj *o,
3515 struct ecore_mcast_ramrod_params *p,
3516 uint32_t *mc_filter)
3517 {
3518 struct ecore_mcast_list_elem *mlist_pos;
3519 int bit;
3520
3521 ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
3522 struct ecore_mcast_list_elem) {
3523 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
3524 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3525
3526 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d\n",
3527 mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2], mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5], bit);
3528
3529 /* bookkeeping... */
3530 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3531 bit);
3532 }
3533 }
3534
ecore_mcast_hdl_restore_e1h(struct bxe_softc * sc,struct ecore_mcast_obj * o,struct ecore_mcast_ramrod_params * p,uint32_t * mc_filter)3535 static inline void ecore_mcast_hdl_restore_e1h(struct bxe_softc *sc,
3536 struct ecore_mcast_obj *o, struct ecore_mcast_ramrod_params *p,
3537 uint32_t *mc_filter)
3538 {
3539 int bit;
3540
3541 for (bit = ecore_mcast_get_next_bin(o, 0);
3542 bit >= 0;
3543 bit = ecore_mcast_get_next_bin(o, bit + 1)) {
3544 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
3545 ECORE_MSG(sc, "About to set bin %d\n", bit);
3546 }
3547 }
3548
3549 /* On 57711 we write the multicast MACs' approximate match
3550 * table by directly into the TSTORM's internal RAM. So we don't
3551 * really need to handle any tricks to make it work.
3552 */
ecore_mcast_setup_e1h(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)3553 static int ecore_mcast_setup_e1h(struct bxe_softc *sc,
3554 struct ecore_mcast_ramrod_params *p,
3555 enum ecore_mcast_cmd cmd)
3556 {
3557 int i;
3558 struct ecore_mcast_obj *o = p->mcast_obj;
3559 struct ecore_raw_obj *r = &o->raw;
3560
3561 /* If CLEAR_ONLY has been requested - clear the registry
3562 * and clear a pending bit.
3563 */
3564 if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3565 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = {0};
3566
3567 /* Set the multicast filter bits before writing it into
3568 * the internal memory.
3569 */
3570 switch (cmd) {
3571 case ECORE_MCAST_CMD_ADD:
3572 ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
3573 break;
3574
3575 case ECORE_MCAST_CMD_DEL:
3576 ECORE_MSG(sc,
3577 "Invalidating multicast MACs configuration\n");
3578
3579 /* clear the registry */
3580 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3581 sizeof(o->registry.aprox_match.vec));
3582 break;
3583
3584 case ECORE_MCAST_CMD_RESTORE:
3585 ecore_mcast_hdl_restore_e1h(sc, o, p, mc_filter);
3586 break;
3587
3588 default:
3589 ECORE_ERR("Unknown command: %d\n", cmd);
3590 return ECORE_INVAL;
3591 }
3592
3593 /* Set the mcast filter in the internal memory */
3594 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3595 REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3596 } else
3597 /* clear the registry */
3598 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3599 sizeof(o->registry.aprox_match.vec));
3600
3601 /* We are done */
3602 r->clear_pending(r);
3603
3604 return ECORE_SUCCESS;
3605 }
3606
ecore_mcast_validate_e1(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)3607 static int ecore_mcast_validate_e1(struct bxe_softc *sc,
3608 struct ecore_mcast_ramrod_params *p,
3609 enum ecore_mcast_cmd cmd)
3610 {
3611 struct ecore_mcast_obj *o = p->mcast_obj;
3612 int reg_sz = o->get_registry_size(o);
3613
3614 switch (cmd) {
3615 /* DEL command deletes all currently configured MACs */
3616 case ECORE_MCAST_CMD_DEL:
3617 o->set_registry_size(o, 0);
3618 /* Don't break */
3619
3620 /* RESTORE command will restore the entire multicast configuration */
3621 case ECORE_MCAST_CMD_RESTORE:
3622 p->mcast_list_len = reg_sz;
3623 ECORE_MSG(sc, "Command %d, p->mcast_list_len=%d\n",
3624 cmd, p->mcast_list_len);
3625 break;
3626
3627 case ECORE_MCAST_CMD_ADD:
3628 case ECORE_MCAST_CMD_CONT:
3629 /* Multicast MACs on 57710 are configured as unicast MACs and
3630 * there is only a limited number of CAM entries for that
3631 * matter.
3632 */
3633 if (p->mcast_list_len > o->max_cmd_len) {
3634 ECORE_ERR("Can't configure more than %d multicast MACs on 57710\n",
3635 o->max_cmd_len);
3636 return ECORE_INVAL;
3637 }
3638 /* Every configured MAC should be cleared if DEL command is
3639 * called. Only the last ADD command is relevant as long as
3640 * every ADD commands overrides the previous configuration.
3641 */
3642 ECORE_MSG(sc, "p->mcast_list_len=%d\n", p->mcast_list_len);
3643 if (p->mcast_list_len > 0)
3644 o->set_registry_size(o, p->mcast_list_len);
3645
3646 break;
3647
3648 default:
3649 ECORE_ERR("Unknown command: %d\n", cmd);
3650 return ECORE_INVAL;
3651 }
3652
3653 /* We want to ensure that commands are executed one by one for 57710.
3654 * Therefore each none-empty command will consume o->max_cmd_len.
3655 */
3656 if (p->mcast_list_len)
3657 o->total_pending_num += o->max_cmd_len;
3658
3659 return ECORE_SUCCESS;
3660 }
3661
ecore_mcast_revert_e1(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,int old_num_macs)3662 static void ecore_mcast_revert_e1(struct bxe_softc *sc,
3663 struct ecore_mcast_ramrod_params *p,
3664 int old_num_macs)
3665 {
3666 struct ecore_mcast_obj *o = p->mcast_obj;
3667
3668 o->set_registry_size(o, old_num_macs);
3669
3670 /* If current command hasn't been handled yet and we are
3671 * here means that it's meant to be dropped and we have to
3672 * update the number of outstanding MACs accordingly.
3673 */
3674 if (p->mcast_list_len)
3675 o->total_pending_num -= o->max_cmd_len;
3676 }
3677
ecore_mcast_set_one_rule_e1(struct bxe_softc * sc,struct ecore_mcast_obj * o,int idx,union ecore_mcast_config_data * cfg_data,enum ecore_mcast_cmd cmd)3678 static void ecore_mcast_set_one_rule_e1(struct bxe_softc *sc,
3679 struct ecore_mcast_obj *o, int idx,
3680 union ecore_mcast_config_data *cfg_data,
3681 enum ecore_mcast_cmd cmd)
3682 {
3683 struct ecore_raw_obj *r = &o->raw;
3684 struct mac_configuration_cmd *data =
3685 (struct mac_configuration_cmd *)(r->rdata);
3686
3687 /* copy mac */
3688 if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE)) {
3689 ecore_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3690 &data->config_table[idx].middle_mac_addr,
3691 &data->config_table[idx].lsb_mac_addr,
3692 cfg_data->mac);
3693
3694 data->config_table[idx].vlan_id = 0;
3695 data->config_table[idx].pf_id = r->func_id;
3696 data->config_table[idx].clients_bit_vector =
3697 ECORE_CPU_TO_LE32(1 << r->cl_id);
3698
3699 ECORE_SET_FLAG(data->config_table[idx].flags,
3700 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3701 T_ETH_MAC_COMMAND_SET);
3702 }
3703 }
3704
3705 /**
3706 * ecore_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3707 *
3708 * @sc: device handle
3709 * @p:
3710 * @len: number of rules to handle
3711 */
ecore_mcast_set_rdata_hdr_e1(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,uint8_t len)3712 static inline void ecore_mcast_set_rdata_hdr_e1(struct bxe_softc *sc,
3713 struct ecore_mcast_ramrod_params *p,
3714 uint8_t len)
3715 {
3716 struct ecore_raw_obj *r = &p->mcast_obj->raw;
3717 struct mac_configuration_cmd *data =
3718 (struct mac_configuration_cmd *)(r->rdata);
3719
3720 uint8_t offset = (CHIP_REV_IS_SLOW(sc) ?
3721 ECORE_MAX_EMUL_MULTI*(1 + r->func_id) :
3722 ECORE_MAX_MULTICAST*(1 + r->func_id));
3723
3724 data->hdr.offset = offset;
3725 data->hdr.client_id = ECORE_CPU_TO_LE16(0xff);
3726 data->hdr.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3727 (ECORE_FILTER_MCAST_PENDING <<
3728 ECORE_SWCID_SHIFT));
3729 data->hdr.length = len;
3730 }
3731
3732 /**
3733 * ecore_mcast_handle_restore_cmd_e1 - restore command for 57710
3734 *
3735 * @sc: device handle
3736 * @o:
3737 * @start_idx: index in the registry to start from
3738 * @rdata_idx: index in the ramrod data to start from
3739 *
3740 * restore command for 57710 is like all other commands - always a stand alone
3741 * command - start_idx and rdata_idx will always be 0. This function will always
3742 * succeed.
3743 * returns -1 to comply with 57712 variant.
3744 */
ecore_mcast_handle_restore_cmd_e1(struct bxe_softc * sc,struct ecore_mcast_obj * o,int start_idx,int * rdata_idx)3745 static inline int ecore_mcast_handle_restore_cmd_e1(
3746 struct bxe_softc *sc, struct ecore_mcast_obj *o , int start_idx,
3747 int *rdata_idx)
3748 {
3749 struct ecore_mcast_mac_elem *elem;
3750 int i = 0;
3751 union ecore_mcast_config_data cfg_data = {NULL};
3752
3753 /* go through the registry and configure the MACs from it. */
3754 ECORE_LIST_FOR_EACH_ENTRY(elem, &o->registry.exact_match.macs, link,
3755 struct ecore_mcast_mac_elem) {
3756 cfg_data.mac = &elem->mac[0];
3757 o->set_one_rule(sc, o, i, &cfg_data, ECORE_MCAST_CMD_RESTORE);
3758
3759 i++;
3760
3761 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3762 cfg_data.mac[0], cfg_data.mac[1], cfg_data.mac[2], cfg_data.mac[3], cfg_data.mac[4], cfg_data.mac[5]);
3763 }
3764
3765 *rdata_idx = i;
3766
3767 return -1;
3768 }
3769
ecore_mcast_handle_pending_cmds_e1(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p)3770 static inline int ecore_mcast_handle_pending_cmds_e1(
3771 struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p)
3772 {
3773 struct ecore_pending_mcast_cmd *cmd_pos;
3774 struct ecore_mcast_mac_elem *pmac_pos;
3775 struct ecore_mcast_obj *o = p->mcast_obj;
3776 union ecore_mcast_config_data cfg_data = {NULL};
3777 int cnt = 0;
3778
3779 /* If nothing to be done - return */
3780 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3781 return 0;
3782
3783 /* Handle the first command */
3784 cmd_pos = ECORE_LIST_FIRST_ENTRY(&o->pending_cmds_head,
3785 struct ecore_pending_mcast_cmd, link);
3786
3787 switch (cmd_pos->type) {
3788 case ECORE_MCAST_CMD_ADD:
3789 ECORE_LIST_FOR_EACH_ENTRY(pmac_pos, &cmd_pos->data.macs_head,
3790 link, struct ecore_mcast_mac_elem) {
3791 cfg_data.mac = &pmac_pos->mac[0];
3792 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
3793
3794 cnt++;
3795
3796 ECORE_MSG(sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC\n",
3797 pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2], pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
3798 }
3799 break;
3800
3801 case ECORE_MCAST_CMD_DEL:
3802 cnt = cmd_pos->data.macs_num;
3803 ECORE_MSG(sc, "About to delete %d multicast MACs\n", cnt);
3804 break;
3805
3806 case ECORE_MCAST_CMD_RESTORE:
3807 o->hdl_restore(sc, o, 0, &cnt);
3808 break;
3809
3810 default:
3811 ECORE_ERR("Unknown command: %d\n", cmd_pos->type);
3812 return ECORE_INVAL;
3813 }
3814
3815 ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link, &o->pending_cmds_head);
3816 ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
3817
3818 return cnt;
3819 }
3820
3821 /**
3822 * ecore_get_fw_mac_addr - revert the ecore_set_fw_mac_addr().
3823 *
3824 * @fw_hi:
3825 * @fw_mid:
3826 * @fw_lo:
3827 * @mac:
3828 */
ecore_get_fw_mac_addr(uint16_t * fw_hi,uint16_t * fw_mid,uint16_t * fw_lo,uint8_t * mac)3829 static inline void ecore_get_fw_mac_addr(uint16_t *fw_hi, uint16_t *fw_mid,
3830 uint16_t *fw_lo, uint8_t *mac)
3831 {
3832 mac[1] = ((uint8_t *)fw_hi)[0];
3833 mac[0] = ((uint8_t *)fw_hi)[1];
3834 mac[3] = ((uint8_t *)fw_mid)[0];
3835 mac[2] = ((uint8_t *)fw_mid)[1];
3836 mac[5] = ((uint8_t *)fw_lo)[0];
3837 mac[4] = ((uint8_t *)fw_lo)[1];
3838 }
3839
3840 /**
3841 * ecore_mcast_refresh_registry_e1 -
3842 *
3843 * @sc: device handle
3844 * @cnt:
3845 *
3846 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3847 * and update the registry correspondingly: if ADD - allocate a memory and add
3848 * the entries to the registry (list), if DELETE - clear the registry and free
3849 * the memory.
3850 */
ecore_mcast_refresh_registry_e1(struct bxe_softc * sc,struct ecore_mcast_obj * o)3851 static inline int ecore_mcast_refresh_registry_e1(struct bxe_softc *sc,
3852 struct ecore_mcast_obj *o)
3853 {
3854 struct ecore_raw_obj *raw = &o->raw;
3855 struct ecore_mcast_mac_elem *elem;
3856 struct mac_configuration_cmd *data =
3857 (struct mac_configuration_cmd *)(raw->rdata);
3858
3859 /* If first entry contains a SET bit - the command was ADD,
3860 * otherwise - DEL_ALL
3861 */
3862 if (ECORE_GET_FLAG(data->config_table[0].flags,
3863 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3864 int i, len = data->hdr.length;
3865
3866 /* Break if it was a RESTORE command */
3867 if (!ECORE_LIST_IS_EMPTY(&o->registry.exact_match.macs))
3868 return ECORE_SUCCESS;
3869
3870 elem = ECORE_CALLOC(len, sizeof(*elem), GFP_ATOMIC, sc);
3871 if (!elem) {
3872 ECORE_ERR("Failed to allocate registry memory\n");
3873 return ECORE_NOMEM;
3874 }
3875
3876 for (i = 0; i < len; i++, elem++) {
3877 ecore_get_fw_mac_addr(
3878 &data->config_table[i].msb_mac_addr,
3879 &data->config_table[i].middle_mac_addr,
3880 &data->config_table[i].lsb_mac_addr,
3881 elem->mac);
3882 ECORE_MSG(sc, "Adding registry entry for [%02x:%02x:%02x:%02x:%02x:%02x]\n",
3883 elem->mac[0], elem->mac[1], elem->mac[2], elem->mac[3], elem->mac[4], elem->mac[5]);
3884 ECORE_LIST_PUSH_TAIL(&elem->link,
3885 &o->registry.exact_match.macs);
3886 }
3887 } else {
3888 elem = ECORE_LIST_FIRST_ENTRY(&o->registry.exact_match.macs,
3889 struct ecore_mcast_mac_elem,
3890 link);
3891 ECORE_MSG(sc, "Deleting a registry\n");
3892 ECORE_FREE(sc, elem, sizeof(*elem));
3893 ECORE_LIST_INIT(&o->registry.exact_match.macs);
3894 }
3895
3896 return ECORE_SUCCESS;
3897 }
3898
ecore_mcast_setup_e1(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)3899 static int ecore_mcast_setup_e1(struct bxe_softc *sc,
3900 struct ecore_mcast_ramrod_params *p,
3901 enum ecore_mcast_cmd cmd)
3902 {
3903 struct ecore_mcast_obj *o = p->mcast_obj;
3904 struct ecore_raw_obj *raw = &o->raw;
3905 struct mac_configuration_cmd *data =
3906 (struct mac_configuration_cmd *)(raw->rdata);
3907 int cnt = 0, i, rc;
3908
3909 /* Reset the ramrod data buffer */
3910 ECORE_MEMSET(data, 0, sizeof(*data));
3911
3912 /* First set all entries as invalid */
3913 for (i = 0; i < o->max_cmd_len ; i++)
3914 ECORE_SET_FLAG(data->config_table[i].flags,
3915 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3916 T_ETH_MAC_COMMAND_INVALIDATE);
3917
3918 /* Handle pending commands first */
3919 cnt = ecore_mcast_handle_pending_cmds_e1(sc, p);
3920
3921 /* If there are no more pending commands - clear SCHEDULED state */
3922 if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
3923 o->clear_sched(o);
3924
3925 /* The below may be TRUE iff there were no pending commands */
3926 if (!cnt)
3927 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, 0);
3928
3929 /* For 57710 every command has o->max_cmd_len length to ensure that
3930 * commands are done one at a time.
3931 */
3932 o->total_pending_num -= o->max_cmd_len;
3933
3934 /* send a ramrod */
3935
3936 ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
3937
3938 /* Set ramrod header (in particular, a number of entries to update) */
3939 ecore_mcast_set_rdata_hdr_e1(sc, p, (uint8_t)cnt);
3940
3941 /* update a registry: we need the registry contents to be always up
3942 * to date in order to be able to execute a RESTORE opcode. Here
3943 * we use the fact that for 57710 we sent one command at a time
3944 * hence we may take the registry update out of the command handling
3945 * and do it in a simpler way here.
3946 */
3947 rc = ecore_mcast_refresh_registry_e1(sc, o);
3948 if (rc)
3949 return rc;
3950
3951 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3952 * RAMROD_PENDING status immediately.
3953 */
3954 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3955 raw->clear_pending(raw);
3956 return ECORE_SUCCESS;
3957 } else {
3958 /* No need for an explicit memory barrier here as long as we
3959 * ensure the ordering of writing to the SPQ element
3960 * and updating of the SPQ producer which involves a memory
3961 * read. If the memory read is removed we will have to put a
3962 * full memory barrier there (inside ecore_sp_post()).
3963 */
3964
3965 /* Send a ramrod */
3966 rc = ecore_sp_post( sc,
3967 RAMROD_CMD_ID_ETH_SET_MAC,
3968 raw->cid,
3969 raw->rdata_mapping,
3970 ETH_CONNECTION_TYPE);
3971 if (rc)
3972 return rc;
3973
3974 /* Ramrod completion is pending */
3975 return ECORE_PENDING;
3976 }
3977 }
3978
ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj * o)3979 static int ecore_mcast_get_registry_size_exact(struct ecore_mcast_obj *o)
3980 {
3981 return o->registry.exact_match.num_macs_set;
3982 }
3983
ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj * o)3984 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3985 {
3986 return o->registry.aprox_match.num_bins_set;
3987 }
3988
ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj * o,int n)3989 static void ecore_mcast_set_registry_size_exact(struct ecore_mcast_obj *o,
3990 int n)
3991 {
3992 o->registry.exact_match.num_macs_set = n;
3993 }
3994
ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj * o,int n)3995 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3996 int n)
3997 {
3998 o->registry.aprox_match.num_bins_set = n;
3999 }
4000
ecore_config_mcast(struct bxe_softc * sc,struct ecore_mcast_ramrod_params * p,enum ecore_mcast_cmd cmd)4001 int ecore_config_mcast(struct bxe_softc *sc,
4002 struct ecore_mcast_ramrod_params *p,
4003 enum ecore_mcast_cmd cmd)
4004 {
4005 struct ecore_mcast_obj *o = p->mcast_obj;
4006 struct ecore_raw_obj *r = &o->raw;
4007 int rc = 0, old_reg_size;
4008
4009 /* This is needed to recover number of currently configured mcast macs
4010 * in case of failure.
4011 */
4012 old_reg_size = o->get_registry_size(o);
4013
4014 /* Do some calculations and checks */
4015 rc = o->validate(sc, p, cmd);
4016 if (rc)
4017 return rc;
4018
4019 /* Return if there is no work to do */
4020 if ((!p->mcast_list_len) && (!o->check_sched(o)))
4021 return ECORE_SUCCESS;
4022
4023 ECORE_MSG(sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
4024 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
4025
4026 /* Enqueue the current command to the pending list if we can't complete
4027 * it in the current iteration
4028 */
4029 if (r->check_pending(r) ||
4030 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
4031 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
4032 if (rc < 0)
4033 goto error_exit1;
4034
4035 /* As long as the current command is in a command list we
4036 * don't need to handle it separately.
4037 */
4038 p->mcast_list_len = 0;
4039 }
4040
4041 if (!r->check_pending(r)) {
4042
4043 /* Set 'pending' state */
4044 r->set_pending(r);
4045
4046 /* Configure the new classification in the chip */
4047 rc = o->config_mcast(sc, p, cmd);
4048 if (rc < 0)
4049 goto error_exit2;
4050
4051 /* Wait for a ramrod completion if was requested */
4052 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4053 rc = o->wait_comp(sc, o);
4054 }
4055
4056 return rc;
4057
4058 error_exit2:
4059 r->clear_pending(r);
4060
4061 error_exit1:
4062 o->revert(sc, p, old_reg_size);
4063
4064 return rc;
4065 }
4066
ecore_mcast_clear_sched(struct ecore_mcast_obj * o)4067 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
4068 {
4069 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
4070 ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
4071 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4072 }
4073
ecore_mcast_set_sched(struct ecore_mcast_obj * o)4074 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
4075 {
4076 ECORE_SMP_MB_BEFORE_CLEAR_BIT();
4077 ECORE_SET_BIT(o->sched_state, o->raw.pstate);
4078 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4079 }
4080
ecore_mcast_check_sched(struct ecore_mcast_obj * o)4081 static bool ecore_mcast_check_sched(struct ecore_mcast_obj *o)
4082 {
4083 return !!ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
4084 }
4085
ecore_mcast_check_pending(struct ecore_mcast_obj * o)4086 static bool ecore_mcast_check_pending(struct ecore_mcast_obj *o)
4087 {
4088 return o->raw.check_pending(&o->raw) || o->check_sched(o);
4089 }
4090
ecore_init_mcast_obj(struct bxe_softc * sc,struct ecore_mcast_obj * mcast_obj,uint8_t mcast_cl_id,uint32_t mcast_cid,uint8_t func_id,uint8_t engine_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,unsigned long * pstate,ecore_obj_type type)4091 void ecore_init_mcast_obj(struct bxe_softc *sc,
4092 struct ecore_mcast_obj *mcast_obj,
4093 uint8_t mcast_cl_id, uint32_t mcast_cid, uint8_t func_id,
4094 uint8_t engine_id, void *rdata, ecore_dma_addr_t rdata_mapping,
4095 int state, unsigned long *pstate, ecore_obj_type type)
4096 {
4097 ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
4098
4099 ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
4100 rdata, rdata_mapping, state, pstate, type);
4101
4102 mcast_obj->engine_id = engine_id;
4103
4104 ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
4105
4106 mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
4107 mcast_obj->check_sched = ecore_mcast_check_sched;
4108 mcast_obj->set_sched = ecore_mcast_set_sched;
4109 mcast_obj->clear_sched = ecore_mcast_clear_sched;
4110
4111 if (CHIP_IS_E1(sc)) {
4112 mcast_obj->config_mcast = ecore_mcast_setup_e1;
4113 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
4114 mcast_obj->hdl_restore =
4115 ecore_mcast_handle_restore_cmd_e1;
4116 mcast_obj->check_pending = ecore_mcast_check_pending;
4117
4118 if (CHIP_REV_IS_SLOW(sc))
4119 mcast_obj->max_cmd_len = ECORE_MAX_EMUL_MULTI;
4120 else
4121 mcast_obj->max_cmd_len = ECORE_MAX_MULTICAST;
4122
4123 mcast_obj->wait_comp = ecore_mcast_wait;
4124 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e1;
4125 mcast_obj->validate = ecore_mcast_validate_e1;
4126 mcast_obj->revert = ecore_mcast_revert_e1;
4127 mcast_obj->get_registry_size =
4128 ecore_mcast_get_registry_size_exact;
4129 mcast_obj->set_registry_size =
4130 ecore_mcast_set_registry_size_exact;
4131
4132 /* 57710 is the only chip that uses the exact match for mcast
4133 * at the moment.
4134 */
4135 ECORE_LIST_INIT(&mcast_obj->registry.exact_match.macs);
4136
4137 } else if (CHIP_IS_E1H(sc)) {
4138 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
4139 mcast_obj->enqueue_cmd = NULL;
4140 mcast_obj->hdl_restore = NULL;
4141 mcast_obj->check_pending = ecore_mcast_check_pending;
4142
4143 /* 57711 doesn't send a ramrod, so it has unlimited credit
4144 * for one command.
4145 */
4146 mcast_obj->max_cmd_len = -1;
4147 mcast_obj->wait_comp = ecore_mcast_wait;
4148 mcast_obj->set_one_rule = NULL;
4149 mcast_obj->validate = ecore_mcast_validate_e1h;
4150 mcast_obj->revert = ecore_mcast_revert_e1h;
4151 mcast_obj->get_registry_size =
4152 ecore_mcast_get_registry_size_aprox;
4153 mcast_obj->set_registry_size =
4154 ecore_mcast_set_registry_size_aprox;
4155 } else {
4156 mcast_obj->config_mcast = ecore_mcast_setup_e2;
4157 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
4158 mcast_obj->hdl_restore =
4159 ecore_mcast_handle_restore_cmd_e2;
4160 mcast_obj->check_pending = ecore_mcast_check_pending;
4161 /* TODO: There should be a proper HSI define for this number!!!
4162 */
4163 mcast_obj->max_cmd_len = 16;
4164 mcast_obj->wait_comp = ecore_mcast_wait;
4165 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
4166 mcast_obj->validate = ecore_mcast_validate_e2;
4167 mcast_obj->revert = ecore_mcast_revert_e2;
4168 mcast_obj->get_registry_size =
4169 ecore_mcast_get_registry_size_aprox;
4170 mcast_obj->set_registry_size =
4171 ecore_mcast_set_registry_size_aprox;
4172 }
4173 }
4174
4175 /*************************** Credit handling **********************************/
4176
4177 /**
4178 * atomic_add_ifless - add if the result is less than a given value.
4179 *
4180 * @v: pointer of type ecore_atomic_t
4181 * @a: the amount to add to v...
4182 * @u: ...if (v + a) is less than u.
4183 *
4184 * returns TRUE if (v + a) was less than u, and FALSE otherwise.
4185 *
4186 */
__atomic_add_ifless(ecore_atomic_t * v,int a,int u)4187 static inline bool __atomic_add_ifless(ecore_atomic_t *v, int a, int u)
4188 {
4189 int c, old;
4190
4191 c = ECORE_ATOMIC_READ(v);
4192 for (;;) {
4193 if (ECORE_UNLIKELY(c + a >= u))
4194 return FALSE;
4195
4196 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
4197 if (ECORE_LIKELY(old == c))
4198 break;
4199 c = old;
4200 }
4201
4202 return TRUE;
4203 }
4204
4205 /**
4206 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
4207 *
4208 * @v: pointer of type ecore_atomic_t
4209 * @a: the amount to dec from v...
4210 * @u: ...if (v - a) is more or equal than u.
4211 *
4212 * returns TRUE if (v - a) was more or equal than u, and FALSE
4213 * otherwise.
4214 */
__atomic_dec_ifmoe(ecore_atomic_t * v,int a,int u)4215 static inline bool __atomic_dec_ifmoe(ecore_atomic_t *v, int a, int u)
4216 {
4217 int c, old;
4218
4219 c = ECORE_ATOMIC_READ(v);
4220 for (;;) {
4221 if (ECORE_UNLIKELY(c - a < u))
4222 return FALSE;
4223
4224 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
4225 if (ECORE_LIKELY(old == c))
4226 break;
4227 c = old;
4228 }
4229
4230 return TRUE;
4231 }
4232
ecore_credit_pool_get(struct ecore_credit_pool_obj * o,int cnt)4233 static bool ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
4234 {
4235 bool rc;
4236
4237 ECORE_SMP_MB();
4238 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4239 ECORE_SMP_MB();
4240
4241 return rc;
4242 }
4243
ecore_credit_pool_put(struct ecore_credit_pool_obj * o,int cnt)4244 static bool ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
4245 {
4246 bool rc;
4247
4248 ECORE_SMP_MB();
4249
4250 /* Don't let to refill if credit + cnt > pool_sz */
4251 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4252
4253 ECORE_SMP_MB();
4254
4255 return rc;
4256 }
4257
ecore_credit_pool_check(struct ecore_credit_pool_obj * o)4258 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
4259 {
4260 int cur_credit;
4261
4262 ECORE_SMP_MB();
4263 cur_credit = ECORE_ATOMIC_READ(&o->credit);
4264
4265 return cur_credit;
4266 }
4267
ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj * o,int cnt)4268 static bool ecore_credit_pool_always_TRUE(struct ecore_credit_pool_obj *o,
4269 int cnt)
4270 {
4271 return TRUE;
4272 }
4273
ecore_credit_pool_get_entry(struct ecore_credit_pool_obj * o,int * offset)4274 static bool ecore_credit_pool_get_entry(
4275 struct ecore_credit_pool_obj *o,
4276 int *offset)
4277 {
4278 int idx, vec, i;
4279
4280 *offset = -1;
4281
4282 /* Find "internal cam-offset" then add to base for this object... */
4283 for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
4284
4285 /* Skip the current vector if there are no free entries in it */
4286 if (!o->pool_mirror[vec])
4287 continue;
4288
4289 /* If we've got here we are going to find a free entry */
4290 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
4291 i < BIT_VEC64_ELEM_SZ; idx++, i++)
4292
4293 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4294 /* Got one!! */
4295 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4296 *offset = o->base_pool_offset + idx;
4297 return TRUE;
4298 }
4299 }
4300
4301 return FALSE;
4302 }
4303
ecore_credit_pool_put_entry(struct ecore_credit_pool_obj * o,int offset)4304 static bool ecore_credit_pool_put_entry(
4305 struct ecore_credit_pool_obj *o,
4306 int offset)
4307 {
4308 if (offset < o->base_pool_offset)
4309 return FALSE;
4310
4311 offset -= o->base_pool_offset;
4312
4313 if (offset >= o->pool_sz)
4314 return FALSE;
4315
4316 /* Return the entry to the pool */
4317 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4318
4319 return TRUE;
4320 }
4321
ecore_credit_pool_put_entry_always_TRUE(struct ecore_credit_pool_obj * o,int offset)4322 static bool ecore_credit_pool_put_entry_always_TRUE(
4323 struct ecore_credit_pool_obj *o,
4324 int offset)
4325 {
4326 return TRUE;
4327 }
4328
ecore_credit_pool_get_entry_always_TRUE(struct ecore_credit_pool_obj * o,int * offset)4329 static bool ecore_credit_pool_get_entry_always_TRUE(
4330 struct ecore_credit_pool_obj *o,
4331 int *offset)
4332 {
4333 *offset = -1;
4334 return TRUE;
4335 }
4336 /**
4337 * ecore_init_credit_pool - initialize credit pool internals.
4338 *
4339 * @p:
4340 * @base: Base entry in the CAM to use.
4341 * @credit: pool size.
4342 *
4343 * If base is negative no CAM entries handling will be performed.
4344 * If credit is negative pool operations will always succeed (unlimited pool).
4345 *
4346 */
ecore_init_credit_pool(struct ecore_credit_pool_obj * p,int base,int credit)4347 void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
4348 int base, int credit)
4349 {
4350 /* Zero the object first */
4351 ECORE_MEMSET(p, 0, sizeof(*p));
4352
4353 /* Set the table to all 1s */
4354 ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
4355
4356 /* Init a pool as full */
4357 ECORE_ATOMIC_SET(&p->credit, credit);
4358
4359 /* The total poll size */
4360 p->pool_sz = credit;
4361
4362 p->base_pool_offset = base;
4363
4364 /* Commit the change */
4365 ECORE_SMP_MB();
4366
4367 p->check = ecore_credit_pool_check;
4368
4369 /* if pool credit is negative - disable the checks */
4370 if (credit >= 0) {
4371 p->put = ecore_credit_pool_put;
4372 p->get = ecore_credit_pool_get;
4373 p->put_entry = ecore_credit_pool_put_entry;
4374 p->get_entry = ecore_credit_pool_get_entry;
4375 } else {
4376 p->put = ecore_credit_pool_always_TRUE;
4377 p->get = ecore_credit_pool_always_TRUE;
4378 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4379 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4380 }
4381
4382 /* If base is negative - disable entries handling */
4383 if (base < 0) {
4384 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
4385 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
4386 }
4387 }
4388
ecore_init_mac_credit_pool(struct bxe_softc * sc,struct ecore_credit_pool_obj * p,uint8_t func_id,uint8_t func_num)4389 void ecore_init_mac_credit_pool(struct bxe_softc *sc,
4390 struct ecore_credit_pool_obj *p, uint8_t func_id,
4391 uint8_t func_num)
4392 {
4393 /* TODO: this will be defined in consts as well... */
4394 #define ECORE_CAM_SIZE_EMUL 5
4395
4396 int cam_sz;
4397
4398 if (CHIP_IS_E1(sc)) {
4399 /* In E1, Multicast is saved in cam... */
4400 if (!CHIP_REV_IS_SLOW(sc))
4401 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - ECORE_MAX_MULTICAST;
4402 else
4403 cam_sz = ECORE_CAM_SIZE_EMUL - ECORE_MAX_EMUL_MULTI;
4404
4405 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4406
4407 } else if (CHIP_IS_E1H(sc)) {
4408 /* CAM credit is equally divided between all active functions
4409 * on the PORT!.
4410 */
4411 if ((func_num > 0)) {
4412 if (!CHIP_REV_IS_SLOW(sc))
4413 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
4414 else
4415 cam_sz = ECORE_CAM_SIZE_EMUL;
4416 ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
4417 } else {
4418 /* this should never happen! Block MAC operations. */
4419 ecore_init_credit_pool(p, 0, 0);
4420 }
4421 } else {
4422 /*
4423 * CAM credit is equaly divided between all active functions
4424 * on the PATH.
4425 */
4426 if (func_num > 0) {
4427 if (!CHIP_REV_IS_SLOW(sc))
4428 cam_sz = PF_MAC_CREDIT_E2(sc, func_num);
4429 else
4430 cam_sz = ECORE_CAM_SIZE_EMUL;
4431
4432 /* No need for CAM entries handling for 57712 and
4433 * newer.
4434 */
4435 ecore_init_credit_pool(p, -1, cam_sz);
4436 } else {
4437 /* this should never happen! Block MAC operations. */
4438 ecore_init_credit_pool(p, 0, 0);
4439 }
4440 }
4441 }
4442
ecore_init_vlan_credit_pool(struct bxe_softc * sc,struct ecore_credit_pool_obj * p,uint8_t func_id,uint8_t func_num)4443 void ecore_init_vlan_credit_pool(struct bxe_softc *sc,
4444 struct ecore_credit_pool_obj *p,
4445 uint8_t func_id,
4446 uint8_t func_num)
4447 {
4448 if (CHIP_IS_E1x(sc)) {
4449 /* There is no VLAN credit in HW on 57710 and 57711 only
4450 * MAC / MAC-VLAN can be set
4451 */
4452 ecore_init_credit_pool(p, 0, -1);
4453 } else {
4454 /* CAM credit is equally divided between all active functions
4455 * on the PATH.
4456 */
4457 if (func_num > 0) {
4458 int credit = PF_VLAN_CREDIT_E2(sc, func_num);
4459
4460 ecore_init_credit_pool(p, -1/*unused for E2*/, credit);
4461 } else
4462 /* this should never happen! Block VLAN operations. */
4463 ecore_init_credit_pool(p, 0, 0);
4464 }
4465 }
4466
4467 /****************** RSS Configuration ******************/
4468
4469 /**
4470 * ecore_setup_rss - configure RSS
4471 *
4472 * @sc: device handle
4473 * @p: rss configuration
4474 *
4475 * sends on UPDATE ramrod for that matter.
4476 */
ecore_setup_rss(struct bxe_softc * sc,struct ecore_config_rss_params * p)4477 static int ecore_setup_rss(struct bxe_softc *sc,
4478 struct ecore_config_rss_params *p)
4479 {
4480 struct ecore_rss_config_obj *o = p->rss_obj;
4481 struct ecore_raw_obj *r = &o->raw;
4482 struct eth_rss_update_ramrod_data *data =
4483 (struct eth_rss_update_ramrod_data *)(r->rdata);
4484 uint16_t caps = 0;
4485 uint8_t rss_mode = 0;
4486 int rc;
4487
4488 ECORE_MEMSET(data, 0, sizeof(*data));
4489
4490 ECORE_MSG(sc, "Configuring RSS\n");
4491
4492 /* Set an echo field */
4493 data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
4494 (r->state << ECORE_SWCID_SHIFT));
4495
4496 /* RSS mode */
4497 if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
4498 rss_mode = ETH_RSS_MODE_DISABLED;
4499 else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
4500 rss_mode = ETH_RSS_MODE_REGULAR;
4501
4502 data->rss_mode = rss_mode;
4503
4504 ECORE_MSG(sc, "rss_mode=%d\n", rss_mode);
4505
4506 /* RSS capabilities */
4507 if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
4508 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4509
4510 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
4511 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4512
4513 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
4514 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
4515
4516 if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
4517 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4518
4519 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
4520 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4521
4522 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
4523 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
4524
4525 if (ECORE_TEST_BIT(ECORE_RSS_IPV4_VXLAN, &p->rss_flags))
4526 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
4527
4528 if (ECORE_TEST_BIT(ECORE_RSS_IPV6_VXLAN, &p->rss_flags))
4529 caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
4530
4531 if (ECORE_TEST_BIT(ECORE_RSS_TUNN_INNER_HDRS, &p->rss_flags))
4532 caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
4533
4534 /* RSS keys */
4535 if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
4536 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
4537 sizeof(data->rss_key));
4538 caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4539 }
4540
4541 data->capabilities = ECORE_CPU_TO_LE16(caps);
4542
4543 /* Hashing mask */
4544 data->rss_result_mask = p->rss_result_mask;
4545
4546 /* RSS engine ID */
4547 data->rss_engine_id = o->engine_id;
4548
4549 ECORE_MSG(sc, "rss_engine_id=%d\n", data->rss_engine_id);
4550
4551 /* Indirection table */
4552 ECORE_MEMCPY(data->indirection_table, p->ind_table,
4553 T_ETH_INDIRECTION_TABLE_SIZE);
4554
4555 /* Remember the last configuration */
4556 ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4557
4558
4559 /* No need for an explicit memory barrier here as long as we
4560 * ensure the ordering of writing to the SPQ element
4561 * and updating of the SPQ producer which involves a memory
4562 * read. If the memory read is removed we will have to put a
4563 * full memory barrier there (inside ecore_sp_post()).
4564 */
4565
4566 /* Send a ramrod */
4567 rc = ecore_sp_post(sc,
4568 RAMROD_CMD_ID_ETH_RSS_UPDATE,
4569 r->cid,
4570 r->rdata_mapping,
4571 ETH_CONNECTION_TYPE);
4572
4573 if (rc < 0)
4574 return rc;
4575
4576 return ECORE_PENDING;
4577 }
4578
ecore_get_rss_ind_table(struct ecore_rss_config_obj * rss_obj,uint8_t * ind_table)4579 void ecore_get_rss_ind_table(struct ecore_rss_config_obj *rss_obj,
4580 uint8_t *ind_table)
4581 {
4582 ECORE_MEMCPY(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4583 }
4584
ecore_config_rss(struct bxe_softc * sc,struct ecore_config_rss_params * p)4585 int ecore_config_rss(struct bxe_softc *sc,
4586 struct ecore_config_rss_params *p)
4587 {
4588 int rc;
4589 struct ecore_rss_config_obj *o = p->rss_obj;
4590 struct ecore_raw_obj *r = &o->raw;
4591
4592 /* Do nothing if only driver cleanup was requested */
4593 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
4594 ECORE_MSG(sc, "Not configuring RSS ramrod_flags=%lx\n",
4595 p->ramrod_flags);
4596 return ECORE_SUCCESS;
4597 }
4598
4599 r->set_pending(r);
4600
4601 rc = o->config_rss(sc, p);
4602 if (rc < 0) {
4603 r->clear_pending(r);
4604 return rc;
4605 }
4606
4607 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
4608 rc = r->wait_comp(sc, r);
4609
4610 return rc;
4611 }
4612
ecore_init_rss_config_obj(struct bxe_softc * sc,struct ecore_rss_config_obj * rss_obj,uint8_t cl_id,uint32_t cid,uint8_t func_id,uint8_t engine_id,void * rdata,ecore_dma_addr_t rdata_mapping,int state,unsigned long * pstate,ecore_obj_type type)4613 void ecore_init_rss_config_obj(struct bxe_softc *sc,
4614 struct ecore_rss_config_obj *rss_obj,
4615 uint8_t cl_id, uint32_t cid, uint8_t func_id, uint8_t engine_id,
4616 void *rdata, ecore_dma_addr_t rdata_mapping,
4617 int state, unsigned long *pstate,
4618 ecore_obj_type type)
4619 {
4620 ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4621 rdata_mapping, state, pstate, type);
4622
4623 rss_obj->engine_id = engine_id;
4624 rss_obj->config_rss = ecore_setup_rss;
4625 }
4626
4627
4628 /********************** Queue state object ***********************************/
4629
4630 /**
4631 * ecore_queue_state_change - perform Queue state change transition
4632 *
4633 * @sc: device handle
4634 * @params: parameters to perform the transition
4635 *
4636 * returns 0 in case of successfully completed transition, negative error
4637 * code in case of failure, positive (EBUSY) value if there is a completion
4638 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4639 * not set in params->ramrod_flags for asynchronous commands).
4640 *
4641 */
ecore_queue_state_change(struct bxe_softc * sc,struct ecore_queue_state_params * params)4642 int ecore_queue_state_change(struct bxe_softc *sc,
4643 struct ecore_queue_state_params *params)
4644 {
4645 struct ecore_queue_sp_obj *o = params->q_obj;
4646 int rc, pending_bit;
4647 unsigned long *pending = &o->pending;
4648
4649 /* Check that the requested transition is legal */
4650 rc = o->check_transition(sc, o, params);
4651 if (rc) {
4652 ECORE_ERR("check transition returned an error. rc %d\n", rc);
4653 return ECORE_INVAL;
4654 }
4655
4656 /* Set "pending" bit */
4657 ECORE_MSG(sc, "pending bit was=%lx\n", o->pending);
4658 pending_bit = o->set_pending(o, params);
4659 ECORE_MSG(sc, "pending bit now=%lx\n", o->pending);
4660
4661 /* Don't send a command if only driver cleanup was requested */
4662 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4663 o->complete_cmd(sc, o, pending_bit);
4664 else {
4665 /* Send a ramrod */
4666 rc = o->send_cmd(sc, params);
4667 if (rc) {
4668 o->next_state = ECORE_Q_STATE_MAX;
4669 ECORE_CLEAR_BIT(pending_bit, pending);
4670 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4671 return rc;
4672 }
4673
4674 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4675 rc = o->wait_comp(sc, o, pending_bit);
4676 if (rc)
4677 return rc;
4678
4679 return ECORE_SUCCESS;
4680 }
4681 }
4682
4683 return ECORE_RET_PENDING(pending_bit, pending);
4684 }
4685
ecore_queue_set_pending(struct ecore_queue_sp_obj * obj,struct ecore_queue_state_params * params)4686 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
4687 struct ecore_queue_state_params *params)
4688 {
4689 enum ecore_queue_cmd cmd = params->cmd, bit;
4690
4691 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4692 * UPDATE command.
4693 */
4694 if ((cmd == ECORE_Q_CMD_ACTIVATE) ||
4695 (cmd == ECORE_Q_CMD_DEACTIVATE))
4696 bit = ECORE_Q_CMD_UPDATE;
4697 else
4698 bit = cmd;
4699
4700 ECORE_SET_BIT(bit, &obj->pending);
4701 return bit;
4702 }
4703
ecore_queue_wait_comp(struct bxe_softc * sc,struct ecore_queue_sp_obj * o,enum ecore_queue_cmd cmd)4704 static int ecore_queue_wait_comp(struct bxe_softc *sc,
4705 struct ecore_queue_sp_obj *o,
4706 enum ecore_queue_cmd cmd)
4707 {
4708 return ecore_state_wait(sc, cmd, &o->pending);
4709 }
4710
4711 /**
4712 * ecore_queue_comp_cmd - complete the state change command.
4713 *
4714 * @sc: device handle
4715 * @o:
4716 * @cmd:
4717 *
4718 * Checks that the arrived completion is expected.
4719 */
ecore_queue_comp_cmd(struct bxe_softc * sc,struct ecore_queue_sp_obj * o,enum ecore_queue_cmd cmd)4720 static int ecore_queue_comp_cmd(struct bxe_softc *sc,
4721 struct ecore_queue_sp_obj *o,
4722 enum ecore_queue_cmd cmd)
4723 {
4724 unsigned long cur_pending = o->pending;
4725
4726 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4727 ECORE_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
4728 cmd, o->cids[ECORE_PRIMARY_CID_INDEX],
4729 o->state, cur_pending, o->next_state);
4730 return ECORE_INVAL;
4731 }
4732
4733 if (o->next_tx_only >= o->max_cos)
4734 /* >= because tx only must always be smaller than cos since the
4735 * primary connection supports COS 0
4736 */
4737 ECORE_ERR("illegal value for next tx_only: %d. max cos was %d",
4738 o->next_tx_only, o->max_cos);
4739
4740 ECORE_MSG(sc,
4741 "Completing command %d for queue %d, setting state to %d\n",
4742 cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
4743
4744 if (o->next_tx_only) /* print num tx-only if any exist */
4745 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d\n",
4746 o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
4747
4748 o->state = o->next_state;
4749 o->num_tx_only = o->next_tx_only;
4750 o->next_state = ECORE_Q_STATE_MAX;
4751
4752 /* It's important that o->state and o->next_state are
4753 * updated before o->pending.
4754 */
4755 wmb();
4756
4757 ECORE_CLEAR_BIT(cmd, &o->pending);
4758 ECORE_SMP_MB_AFTER_CLEAR_BIT();
4759
4760 return ECORE_SUCCESS;
4761 }
4762
ecore_q_fill_setup_data_e2(struct bxe_softc * sc,struct ecore_queue_state_params * cmd_params,struct client_init_ramrod_data * data)4763 static void ecore_q_fill_setup_data_e2(struct bxe_softc *sc,
4764 struct ecore_queue_state_params *cmd_params,
4765 struct client_init_ramrod_data *data)
4766 {
4767 struct ecore_queue_setup_params *params = &cmd_params->params.setup;
4768
4769 /* Rx data */
4770
4771 /* IPv6 TPA supported for E2 and above only */
4772 data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
4773 ¶ms->flags) *
4774 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4775 }
4776
ecore_q_fill_init_general_data(struct bxe_softc * sc,struct ecore_queue_sp_obj * o,struct ecore_general_setup_params * params,struct client_init_general_data * gen_data,unsigned long * flags)4777 static void ecore_q_fill_init_general_data(struct bxe_softc *sc,
4778 struct ecore_queue_sp_obj *o,
4779 struct ecore_general_setup_params *params,
4780 struct client_init_general_data *gen_data,
4781 unsigned long *flags)
4782 {
4783 gen_data->client_id = o->cl_id;
4784
4785 if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
4786 gen_data->statistics_counter_id =
4787 params->stat_id;
4788 gen_data->statistics_en_flg = 1;
4789 gen_data->statistics_zero_flg =
4790 ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
4791 } else
4792 gen_data->statistics_counter_id =
4793 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4794
4795 gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE,
4796 flags);
4797 gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4798 flags);
4799 gen_data->sp_client_id = params->spcl_id;
4800 gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
4801 gen_data->func_id = o->func_id;
4802
4803 gen_data->cos = params->cos;
4804
4805 gen_data->traffic_type =
4806 ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
4807 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4808
4809 gen_data->fp_hsi_ver = params->fp_hsi;
4810
4811 ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d\n",
4812 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4813 }
4814
ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj * o,struct ecore_txq_setup_params * params,struct client_init_tx_data * tx_data,unsigned long * flags)4815 static void ecore_q_fill_init_tx_data(struct ecore_queue_sp_obj *o,
4816 struct ecore_txq_setup_params *params,
4817 struct client_init_tx_data *tx_data,
4818 unsigned long *flags)
4819 {
4820 tx_data->enforce_security_flg =
4821 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
4822 tx_data->default_vlan =
4823 ECORE_CPU_TO_LE16(params->default_vlan);
4824 tx_data->default_vlan_flg =
4825 ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
4826 tx_data->tx_switching_flg =
4827 ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
4828 tx_data->anti_spoofing_flg =
4829 ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
4830 tx_data->force_default_pri_flg =
4831 ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
4832 tx_data->refuse_outband_vlan_flg =
4833 ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
4834 tx_data->tunnel_lso_inc_ip_id =
4835 ECORE_TEST_BIT(ECORE_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4836 tx_data->tunnel_non_lso_pcsum_location =
4837 ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
4838 CSUM_ON_BD;
4839
4840 tx_data->tx_status_block_id = params->fw_sb_id;
4841 tx_data->tx_sb_index_number = params->sb_cq_index;
4842 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4843
4844 tx_data->tx_bd_page_base.lo =
4845 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4846 tx_data->tx_bd_page_base.hi =
4847 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4848
4849 /* Don't configure any Tx switching mode during queue SETUP */
4850 tx_data->state = 0;
4851 }
4852
ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj * o,struct rxq_pause_params * params,struct client_init_rx_data * rx_data)4853 static void ecore_q_fill_init_pause_data(struct ecore_queue_sp_obj *o,
4854 struct rxq_pause_params *params,
4855 struct client_init_rx_data *rx_data)
4856 {
4857 /* flow control data */
4858 rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
4859 rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
4860 rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
4861 rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
4862 rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
4863 rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
4864 rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
4865 }
4866
ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj * o,struct ecore_rxq_setup_params * params,struct client_init_rx_data * rx_data,unsigned long * flags)4867 static void ecore_q_fill_init_rx_data(struct ecore_queue_sp_obj *o,
4868 struct ecore_rxq_setup_params *params,
4869 struct client_init_rx_data *rx_data,
4870 unsigned long *flags)
4871 {
4872 rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
4873 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4874 rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
4875 CLIENT_INIT_RX_DATA_TPA_MODE;
4876 rx_data->vmqueue_mode_en_flg = 0;
4877
4878 rx_data->extra_data_over_sgl_en_flg =
4879 ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
4880 rx_data->cache_line_alignment_log_size =
4881 params->cache_line_log;
4882 rx_data->enable_dynamic_hc =
4883 ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
4884 rx_data->max_sges_for_packet = params->max_sges_pkt;
4885 rx_data->client_qzone_id = params->cl_qzone_id;
4886 rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
4887
4888 /* Always start in DROP_ALL mode */
4889 rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4890 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4891
4892 /* We don't set drop flags */
4893 rx_data->drop_ip_cs_err_flg = 0;
4894 rx_data->drop_tcp_cs_err_flg = 0;
4895 rx_data->drop_ttl0_flg = 0;
4896 rx_data->drop_udp_cs_err_flg = 0;
4897 rx_data->inner_vlan_removal_enable_flg =
4898 ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
4899 rx_data->outer_vlan_removal_enable_flg =
4900 ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
4901 rx_data->status_block_id = params->fw_sb_id;
4902 rx_data->rx_sb_index_number = params->sb_cq_index;
4903 rx_data->max_tpa_queues = params->max_tpa_queues;
4904 rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
4905 rx_data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buf_sz);
4906 rx_data->bd_page_base.lo =
4907 ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
4908 rx_data->bd_page_base.hi =
4909 ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
4910 rx_data->sge_page_base.lo =
4911 ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
4912 rx_data->sge_page_base.hi =
4913 ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
4914 rx_data->cqe_page_base.lo =
4915 ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
4916 rx_data->cqe_page_base.hi =
4917 ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
4918 rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
4919 flags);
4920
4921 if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
4922 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
4923 rx_data->is_approx_mcast = 1;
4924 }
4925
4926 rx_data->rss_engine_id = params->rss_engine_id;
4927
4928 /* silent vlan removal */
4929 rx_data->silent_vlan_removal_flg =
4930 ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
4931 rx_data->silent_vlan_value =
4932 ECORE_CPU_TO_LE16(params->silent_removal_value);
4933 rx_data->silent_vlan_mask =
4934 ECORE_CPU_TO_LE16(params->silent_removal_mask);
4935 }
4936
4937 /* initialize the general, tx and rx parts of a queue object */
ecore_q_fill_setup_data_cmn(struct bxe_softc * sc,struct ecore_queue_state_params * cmd_params,struct client_init_ramrod_data * data)4938 static void ecore_q_fill_setup_data_cmn(struct bxe_softc *sc,
4939 struct ecore_queue_state_params *cmd_params,
4940 struct client_init_ramrod_data *data)
4941 {
4942 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4943 &cmd_params->params.setup.gen_params,
4944 &data->general,
4945 &cmd_params->params.setup.flags);
4946
4947 ecore_q_fill_init_tx_data(cmd_params->q_obj,
4948 &cmd_params->params.setup.txq_params,
4949 &data->tx,
4950 &cmd_params->params.setup.flags);
4951
4952 ecore_q_fill_init_rx_data(cmd_params->q_obj,
4953 &cmd_params->params.setup.rxq_params,
4954 &data->rx,
4955 &cmd_params->params.setup.flags);
4956
4957 ecore_q_fill_init_pause_data(cmd_params->q_obj,
4958 &cmd_params->params.setup.pause_params,
4959 &data->rx);
4960 }
4961
4962 /* initialize the general and tx parts of a tx-only queue object */
ecore_q_fill_setup_tx_only(struct bxe_softc * sc,struct ecore_queue_state_params * cmd_params,struct tx_queue_init_ramrod_data * data)4963 static void ecore_q_fill_setup_tx_only(struct bxe_softc *sc,
4964 struct ecore_queue_state_params *cmd_params,
4965 struct tx_queue_init_ramrod_data *data)
4966 {
4967 ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
4968 &cmd_params->params.tx_only.gen_params,
4969 &data->general,
4970 &cmd_params->params.tx_only.flags);
4971
4972 ecore_q_fill_init_tx_data(cmd_params->q_obj,
4973 &cmd_params->params.tx_only.txq_params,
4974 &data->tx,
4975 &cmd_params->params.tx_only.flags);
4976
4977 ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
4978 cmd_params->q_obj->cids[0],
4979 data->tx.tx_bd_page_base.lo,
4980 data->tx.tx_bd_page_base.hi);
4981 }
4982
4983 /**
4984 * ecore_q_init - init HW/FW queue
4985 *
4986 * @sc: device handle
4987 * @params:
4988 *
4989 * HW/FW initial Queue configuration:
4990 * - HC: Rx and Tx
4991 * - CDU context validation
4992 *
4993 */
ecore_q_init(struct bxe_softc * sc,struct ecore_queue_state_params * params)4994 static inline int ecore_q_init(struct bxe_softc *sc,
4995 struct ecore_queue_state_params *params)
4996 {
4997 struct ecore_queue_sp_obj *o = params->q_obj;
4998 struct ecore_queue_init_params *init = ¶ms->params.init;
4999 uint16_t hc_usec;
5000 uint8_t cos;
5001
5002 /* Tx HC configuration */
5003 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
5004 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
5005 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
5006
5007 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
5008 init->tx.sb_cq_index,
5009 !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->tx.flags),
5010 hc_usec);
5011 }
5012
5013 /* Rx HC configuration */
5014 if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
5015 ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
5016 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
5017
5018 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
5019 init->rx.sb_cq_index,
5020 !ECORE_TEST_BIT(ECORE_Q_FLG_HC_EN, &init->rx.flags),
5021 hc_usec);
5022 }
5023
5024 /* Set CDU context validation values */
5025 for (cos = 0; cos < o->max_cos; cos++) {
5026 ECORE_MSG(sc, "setting context validation. cid %d, cos %d\n",
5027 o->cids[cos], cos);
5028 ECORE_MSG(sc, "context pointer %p\n", init->cxts[cos]);
5029 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
5030 }
5031
5032 /* As no ramrod is sent, complete the command immediately */
5033 o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
5034
5035 ECORE_MMIOWB();
5036 ECORE_SMP_MB();
5037
5038 return ECORE_SUCCESS;
5039 }
5040
ecore_q_send_setup_e1x(struct bxe_softc * sc,struct ecore_queue_state_params * params)5041 static inline int ecore_q_send_setup_e1x(struct bxe_softc *sc,
5042 struct ecore_queue_state_params *params)
5043 {
5044 struct ecore_queue_sp_obj *o = params->q_obj;
5045 struct client_init_ramrod_data *rdata =
5046 (struct client_init_ramrod_data *)o->rdata;
5047 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5048 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5049
5050 /* Clear the ramrod data */
5051 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5052
5053 /* Fill the ramrod data */
5054 ecore_q_fill_setup_data_cmn(sc, params, rdata);
5055
5056 /* No need for an explicit memory barrier here as long as we
5057 * ensure the ordering of writing to the SPQ element
5058 * and updating of the SPQ producer which involves a memory
5059 * read. If the memory read is removed we will have to put a
5060 * full memory barrier there (inside ecore_sp_post()).
5061 */
5062 return ecore_sp_post(sc,
5063 ramrod,
5064 o->cids[ECORE_PRIMARY_CID_INDEX],
5065 data_mapping,
5066 ETH_CONNECTION_TYPE);
5067 }
5068
ecore_q_send_setup_e2(struct bxe_softc * sc,struct ecore_queue_state_params * params)5069 static inline int ecore_q_send_setup_e2(struct bxe_softc *sc,
5070 struct ecore_queue_state_params *params)
5071 {
5072 struct ecore_queue_sp_obj *o = params->q_obj;
5073 struct client_init_ramrod_data *rdata =
5074 (struct client_init_ramrod_data *)o->rdata;
5075 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5076 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5077
5078 /* Clear the ramrod data */
5079 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5080
5081 /* Fill the ramrod data */
5082 ecore_q_fill_setup_data_cmn(sc, params, rdata);
5083 ecore_q_fill_setup_data_e2(sc, params, rdata);
5084
5085 /* No need for an explicit memory barrier here as long as we
5086 * ensure the ordering of writing to the SPQ element
5087 * and updating of the SPQ producer which involves a memory
5088 * read. If the memory read is removed we will have to put a
5089 * full memory barrier there (inside ecore_sp_post()).
5090 */
5091 return ecore_sp_post(sc,
5092 ramrod,
5093 o->cids[ECORE_PRIMARY_CID_INDEX],
5094 data_mapping,
5095 ETH_CONNECTION_TYPE);
5096 }
5097
ecore_q_send_setup_tx_only(struct bxe_softc * sc,struct ecore_queue_state_params * params)5098 static inline int ecore_q_send_setup_tx_only(struct bxe_softc *sc,
5099 struct ecore_queue_state_params *params)
5100 {
5101 struct ecore_queue_sp_obj *o = params->q_obj;
5102 struct tx_queue_init_ramrod_data *rdata =
5103 (struct tx_queue_init_ramrod_data *)o->rdata;
5104 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5105 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
5106 struct ecore_queue_setup_tx_only_params *tx_only_params =
5107 ¶ms->params.tx_only;
5108 uint8_t cid_index = tx_only_params->cid_index;
5109
5110 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
5111 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
5112 ECORE_MSG(sc, "sending forward tx-only ramrod");
5113
5114 if (cid_index >= o->max_cos) {
5115 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5116 o->cl_id, cid_index);
5117 return ECORE_INVAL;
5118 }
5119
5120 ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d\n",
5121 tx_only_params->gen_params.cos,
5122 tx_only_params->gen_params.spcl_id);
5123
5124 /* Clear the ramrod data */
5125 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5126
5127 /* Fill the ramrod data */
5128 ecore_q_fill_setup_tx_only(sc, params, rdata);
5129
5130 ECORE_MSG(sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
5131 o->cids[cid_index], rdata->general.client_id,
5132 rdata->general.sp_client_id, rdata->general.cos);
5133
5134 /* No need for an explicit memory barrier here as long as we
5135 * ensure the ordering of writing to the SPQ element
5136 * and updating of the SPQ producer which involves a memory
5137 * read. If the memory read is removed we will have to put a
5138 * full memory barrier there (inside ecore_sp_post()).
5139 */
5140 return ecore_sp_post(sc, ramrod, o->cids[cid_index],
5141 data_mapping, ETH_CONNECTION_TYPE);
5142 }
5143
ecore_q_fill_update_data(struct bxe_softc * sc,struct ecore_queue_sp_obj * obj,struct ecore_queue_update_params * params,struct client_update_ramrod_data * data)5144 static void ecore_q_fill_update_data(struct bxe_softc *sc,
5145 struct ecore_queue_sp_obj *obj,
5146 struct ecore_queue_update_params *params,
5147 struct client_update_ramrod_data *data)
5148 {
5149 /* Client ID of the client to update */
5150 data->client_id = obj->cl_id;
5151
5152 /* Function ID of the client to update */
5153 data->func_id = obj->func_id;
5154
5155 /* Default VLAN value */
5156 data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
5157
5158 /* Inner VLAN stripping */
5159 data->inner_vlan_removal_enable_flg =
5160 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM,
5161 ¶ms->update_flags);
5162 data->inner_vlan_removal_change_flg =
5163 ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
5164 ¶ms->update_flags);
5165
5166 /* Outer VLAN stripping */
5167 data->outer_vlan_removal_enable_flg =
5168 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM,
5169 ¶ms->update_flags);
5170 data->outer_vlan_removal_change_flg =
5171 ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
5172 ¶ms->update_flags);
5173
5174 /* Drop packets that have source MAC that doesn't belong to this
5175 * Queue.
5176 */
5177 data->anti_spoofing_enable_flg =
5178 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF,
5179 ¶ms->update_flags);
5180 data->anti_spoofing_change_flg =
5181 ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
5182 ¶ms->update_flags);
5183
5184 /* Activate/Deactivate */
5185 data->activate_flg =
5186 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
5187 data->activate_change_flg =
5188 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5189 ¶ms->update_flags);
5190
5191 /* Enable default VLAN */
5192 data->default_vlan_enable_flg =
5193 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN,
5194 ¶ms->update_flags);
5195 data->default_vlan_change_flg =
5196 ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
5197 ¶ms->update_flags);
5198
5199 /* silent vlan removal */
5200 data->silent_vlan_change_flg =
5201 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5202 ¶ms->update_flags);
5203 data->silent_vlan_removal_flg =
5204 ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
5205 ¶ms->update_flags);
5206 data->silent_vlan_value = ECORE_CPU_TO_LE16(params->silent_removal_value);
5207 data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
5208
5209 /* tx switching */
5210 data->tx_switching_flg =
5211 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING,
5212 ¶ms->update_flags);
5213 data->tx_switching_change_flg =
5214 ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
5215 ¶ms->update_flags);
5216
5217 /* PTP */
5218 data->handle_ptp_pkts_flg =
5219 ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS,
5220 ¶ms->update_flags);
5221 data->handle_ptp_pkts_change_flg =
5222 ECORE_TEST_BIT(ECORE_Q_UPDATE_PTP_PKTS_CHNG,
5223 ¶ms->update_flags);
5224 }
5225
ecore_q_send_update(struct bxe_softc * sc,struct ecore_queue_state_params * params)5226 static inline int ecore_q_send_update(struct bxe_softc *sc,
5227 struct ecore_queue_state_params *params)
5228 {
5229 struct ecore_queue_sp_obj *o = params->q_obj;
5230 struct client_update_ramrod_data *rdata =
5231 (struct client_update_ramrod_data *)o->rdata;
5232 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5233 struct ecore_queue_update_params *update_params =
5234 ¶ms->params.update;
5235 uint8_t cid_index = update_params->cid_index;
5236
5237 if (cid_index >= o->max_cos) {
5238 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5239 o->cl_id, cid_index);
5240 return ECORE_INVAL;
5241 }
5242
5243 /* Clear the ramrod data */
5244 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5245
5246 /* Fill the ramrod data */
5247 ecore_q_fill_update_data(sc, o, update_params, rdata);
5248
5249 /* No need for an explicit memory barrier here as long as we
5250 * ensure the ordering of writing to the SPQ element
5251 * and updating of the SPQ producer which involves a memory
5252 * read. If the memory read is removed we will have to put a
5253 * full memory barrier there (inside ecore_sp_post()).
5254 */
5255 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
5256 o->cids[cid_index], data_mapping,
5257 ETH_CONNECTION_TYPE);
5258 }
5259
5260 /**
5261 * ecore_q_send_deactivate - send DEACTIVATE command
5262 *
5263 * @sc: device handle
5264 * @params:
5265 *
5266 * implemented using the UPDATE command.
5267 */
ecore_q_send_deactivate(struct bxe_softc * sc,struct ecore_queue_state_params * params)5268 static inline int ecore_q_send_deactivate(struct bxe_softc *sc,
5269 struct ecore_queue_state_params *params)
5270 {
5271 struct ecore_queue_update_params *update = ¶ms->params.update;
5272
5273 ECORE_MEMSET(update, 0, sizeof(*update));
5274
5275 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5276
5277 return ecore_q_send_update(sc, params);
5278 }
5279
5280 /**
5281 * ecore_q_send_activate - send ACTIVATE command
5282 *
5283 * @sc: device handle
5284 * @params:
5285 *
5286 * implemented using the UPDATE command.
5287 */
ecore_q_send_activate(struct bxe_softc * sc,struct ecore_queue_state_params * params)5288 static inline int ecore_q_send_activate(struct bxe_softc *sc,
5289 struct ecore_queue_state_params *params)
5290 {
5291 struct ecore_queue_update_params *update = ¶ms->params.update;
5292
5293 ECORE_MEMSET(update, 0, sizeof(*update));
5294
5295 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
5296 ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
5297
5298 return ecore_q_send_update(sc, params);
5299 }
5300
ecore_q_fill_update_tpa_data(struct bxe_softc * sc,struct ecore_queue_sp_obj * obj,struct ecore_queue_update_tpa_params * params,struct tpa_update_ramrod_data * data)5301 static void ecore_q_fill_update_tpa_data(struct bxe_softc *sc,
5302 struct ecore_queue_sp_obj *obj,
5303 struct ecore_queue_update_tpa_params *params,
5304 struct tpa_update_ramrod_data *data)
5305 {
5306 data->client_id = obj->cl_id;
5307 data->complete_on_both_clients = params->complete_on_both_clients;
5308 data->dont_verify_rings_pause_thr_flg =
5309 params->dont_verify_thr;
5310 data->max_agg_size = ECORE_CPU_TO_LE16(params->max_agg_sz);
5311 data->max_sges_for_packet = params->max_sges_pkt;
5312 data->max_tpa_queues = params->max_tpa_queues;
5313 data->sge_buff_size = ECORE_CPU_TO_LE16(params->sge_buff_sz);
5314 data->sge_page_base_hi = ECORE_CPU_TO_LE32(U64_HI(params->sge_map));
5315 data->sge_page_base_lo = ECORE_CPU_TO_LE32(U64_LO(params->sge_map));
5316 data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_pause_thr_high);
5317 data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_pause_thr_low);
5318 data->tpa_mode = params->tpa_mode;
5319 data->update_ipv4 = params->update_ipv4;
5320 data->update_ipv6 = params->update_ipv6;
5321 }
5322
ecore_q_send_update_tpa(struct bxe_softc * sc,struct ecore_queue_state_params * params)5323 static inline int ecore_q_send_update_tpa(struct bxe_softc *sc,
5324 struct ecore_queue_state_params *params)
5325 {
5326 struct ecore_queue_sp_obj *o = params->q_obj;
5327 struct tpa_update_ramrod_data *rdata =
5328 (struct tpa_update_ramrod_data *)o->rdata;
5329 ecore_dma_addr_t data_mapping = o->rdata_mapping;
5330 struct ecore_queue_update_tpa_params *update_tpa_params =
5331 ¶ms->params.update_tpa;
5332 uint16_t type;
5333
5334 /* Clear the ramrod data */
5335 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5336
5337 /* Fill the ramrod data */
5338 ecore_q_fill_update_tpa_data(sc, o, update_tpa_params, rdata);
5339
5340 /* Add the function id inside the type, so that sp post function
5341 * doesn't automatically add the PF func-id, this is required
5342 * for operations done by PFs on behalf of their VFs
5343 */
5344 type = ETH_CONNECTION_TYPE |
5345 ((o->func_id) << SPE_HDR_T_FUNCTION_ID_SHIFT);
5346
5347 /* No need for an explicit memory barrier here as long as we
5348 * ensure the ordering of writing to the SPQ element
5349 * and updating of the SPQ producer which involves a memory
5350 * read. If the memory read is removed we will have to put a
5351 * full memory barrier there (inside ecore_sp_post()).
5352 */
5353 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TPA_UPDATE,
5354 o->cids[ECORE_PRIMARY_CID_INDEX],
5355 data_mapping, type);
5356 }
5357
ecore_q_send_halt(struct bxe_softc * sc,struct ecore_queue_state_params * params)5358 static inline int ecore_q_send_halt(struct bxe_softc *sc,
5359 struct ecore_queue_state_params *params)
5360 {
5361 struct ecore_queue_sp_obj *o = params->q_obj;
5362
5363 /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
5364 ecore_dma_addr_t data_mapping = 0;
5365 data_mapping = (ecore_dma_addr_t)o->cl_id;
5366
5367 /* No need for an explicit memory barrier here as long as we
5368 * ensure the ordering of writing to the SPQ element
5369 * and updating of the SPQ producer which involves a memory
5370 * read. If the memory read is removed we will have to put a
5371 * full memory barrier there (inside ecore_sp_post()).
5372 */
5373 return ecore_sp_post(sc,
5374 RAMROD_CMD_ID_ETH_HALT,
5375 o->cids[ECORE_PRIMARY_CID_INDEX],
5376 data_mapping,
5377 ETH_CONNECTION_TYPE);
5378 }
5379
ecore_q_send_cfc_del(struct bxe_softc * sc,struct ecore_queue_state_params * params)5380 static inline int ecore_q_send_cfc_del(struct bxe_softc *sc,
5381 struct ecore_queue_state_params *params)
5382 {
5383 struct ecore_queue_sp_obj *o = params->q_obj;
5384 uint8_t cid_idx = params->params.cfc_del.cid_index;
5385
5386 if (cid_idx >= o->max_cos) {
5387 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5388 o->cl_id, cid_idx);
5389 return ECORE_INVAL;
5390 }
5391
5392 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
5393 o->cids[cid_idx], 0,
5394 NONE_CONNECTION_TYPE);
5395 }
5396
ecore_q_send_terminate(struct bxe_softc * sc,struct ecore_queue_state_params * params)5397 static inline int ecore_q_send_terminate(struct bxe_softc *sc,
5398 struct ecore_queue_state_params *params)
5399 {
5400 struct ecore_queue_sp_obj *o = params->q_obj;
5401 uint8_t cid_index = params->params.terminate.cid_index;
5402
5403 if (cid_index >= o->max_cos) {
5404 ECORE_ERR("queue[%d]: cid_index (%d) is out of range\n",
5405 o->cl_id, cid_index);
5406 return ECORE_INVAL;
5407 }
5408
5409 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
5410 o->cids[cid_index], 0,
5411 ETH_CONNECTION_TYPE);
5412 }
5413
ecore_q_send_empty(struct bxe_softc * sc,struct ecore_queue_state_params * params)5414 static inline int ecore_q_send_empty(struct bxe_softc *sc,
5415 struct ecore_queue_state_params *params)
5416 {
5417 struct ecore_queue_sp_obj *o = params->q_obj;
5418
5419 return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
5420 o->cids[ECORE_PRIMARY_CID_INDEX], 0,
5421 ETH_CONNECTION_TYPE);
5422 }
5423
ecore_queue_send_cmd_cmn(struct bxe_softc * sc,struct ecore_queue_state_params * params)5424 static inline int ecore_queue_send_cmd_cmn(struct bxe_softc *sc,
5425 struct ecore_queue_state_params *params)
5426 {
5427 switch (params->cmd) {
5428 case ECORE_Q_CMD_INIT:
5429 return ecore_q_init(sc, params);
5430 case ECORE_Q_CMD_SETUP_TX_ONLY:
5431 return ecore_q_send_setup_tx_only(sc, params);
5432 case ECORE_Q_CMD_DEACTIVATE:
5433 return ecore_q_send_deactivate(sc, params);
5434 case ECORE_Q_CMD_ACTIVATE:
5435 return ecore_q_send_activate(sc, params);
5436 case ECORE_Q_CMD_UPDATE:
5437 return ecore_q_send_update(sc, params);
5438 case ECORE_Q_CMD_UPDATE_TPA:
5439 return ecore_q_send_update_tpa(sc, params);
5440 case ECORE_Q_CMD_HALT:
5441 return ecore_q_send_halt(sc, params);
5442 case ECORE_Q_CMD_CFC_DEL:
5443 return ecore_q_send_cfc_del(sc, params);
5444 case ECORE_Q_CMD_TERMINATE:
5445 return ecore_q_send_terminate(sc, params);
5446 case ECORE_Q_CMD_EMPTY:
5447 return ecore_q_send_empty(sc, params);
5448 default:
5449 ECORE_ERR("Unknown command: %d\n", params->cmd);
5450 return ECORE_INVAL;
5451 }
5452 }
5453
ecore_queue_send_cmd_e1x(struct bxe_softc * sc,struct ecore_queue_state_params * params)5454 static int ecore_queue_send_cmd_e1x(struct bxe_softc *sc,
5455 struct ecore_queue_state_params *params)
5456 {
5457 switch (params->cmd) {
5458 case ECORE_Q_CMD_SETUP:
5459 return ecore_q_send_setup_e1x(sc, params);
5460 case ECORE_Q_CMD_INIT:
5461 case ECORE_Q_CMD_SETUP_TX_ONLY:
5462 case ECORE_Q_CMD_DEACTIVATE:
5463 case ECORE_Q_CMD_ACTIVATE:
5464 case ECORE_Q_CMD_UPDATE:
5465 case ECORE_Q_CMD_UPDATE_TPA:
5466 case ECORE_Q_CMD_HALT:
5467 case ECORE_Q_CMD_CFC_DEL:
5468 case ECORE_Q_CMD_TERMINATE:
5469 case ECORE_Q_CMD_EMPTY:
5470 return ecore_queue_send_cmd_cmn(sc, params);
5471 default:
5472 ECORE_ERR("Unknown command: %d\n", params->cmd);
5473 return ECORE_INVAL;
5474 }
5475 }
5476
ecore_queue_send_cmd_e2(struct bxe_softc * sc,struct ecore_queue_state_params * params)5477 static int ecore_queue_send_cmd_e2(struct bxe_softc *sc,
5478 struct ecore_queue_state_params *params)
5479 {
5480 switch (params->cmd) {
5481 case ECORE_Q_CMD_SETUP:
5482 return ecore_q_send_setup_e2(sc, params);
5483 case ECORE_Q_CMD_INIT:
5484 case ECORE_Q_CMD_SETUP_TX_ONLY:
5485 case ECORE_Q_CMD_DEACTIVATE:
5486 case ECORE_Q_CMD_ACTIVATE:
5487 case ECORE_Q_CMD_UPDATE:
5488 case ECORE_Q_CMD_UPDATE_TPA:
5489 case ECORE_Q_CMD_HALT:
5490 case ECORE_Q_CMD_CFC_DEL:
5491 case ECORE_Q_CMD_TERMINATE:
5492 case ECORE_Q_CMD_EMPTY:
5493 return ecore_queue_send_cmd_cmn(sc, params);
5494 default:
5495 ECORE_ERR("Unknown command: %d\n", params->cmd);
5496 return ECORE_INVAL;
5497 }
5498 }
5499
5500 /**
5501 * ecore_queue_chk_transition - check state machine of a regular Queue
5502 *
5503 * @sc: device handle
5504 * @o:
5505 * @params:
5506 *
5507 * (not Forwarding)
5508 * It both checks if the requested command is legal in a current
5509 * state and, if it's legal, sets a `next_state' in the object
5510 * that will be used in the completion flow to set the `state'
5511 * of the object.
5512 *
5513 * returns 0 if a requested command is a legal transition,
5514 * ECORE_INVAL otherwise.
5515 */
ecore_queue_chk_transition(struct bxe_softc * sc,struct ecore_queue_sp_obj * o,struct ecore_queue_state_params * params)5516 static int ecore_queue_chk_transition(struct bxe_softc *sc,
5517 struct ecore_queue_sp_obj *o,
5518 struct ecore_queue_state_params *params)
5519 {
5520 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5521 enum ecore_queue_cmd cmd = params->cmd;
5522 struct ecore_queue_update_params *update_params =
5523 ¶ms->params.update;
5524 uint8_t next_tx_only = o->num_tx_only;
5525
5526 /* Forget all pending for completion commands if a driver only state
5527 * transition has been requested.
5528 */
5529 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5530 o->pending = 0;
5531 o->next_state = ECORE_Q_STATE_MAX;
5532 }
5533
5534 /* Don't allow a next state transition if we are in the middle of
5535 * the previous one.
5536 */
5537 if (o->pending) {
5538 ECORE_ERR("Blocking transition since pending was %lx\n",
5539 o->pending);
5540 return ECORE_BUSY;
5541 }
5542
5543 switch (state) {
5544 case ECORE_Q_STATE_RESET:
5545 if (cmd == ECORE_Q_CMD_INIT)
5546 next_state = ECORE_Q_STATE_INITIALIZED;
5547
5548 break;
5549 case ECORE_Q_STATE_INITIALIZED:
5550 if (cmd == ECORE_Q_CMD_SETUP) {
5551 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5552 ¶ms->params.setup.flags))
5553 next_state = ECORE_Q_STATE_ACTIVE;
5554 else
5555 next_state = ECORE_Q_STATE_INACTIVE;
5556 }
5557
5558 break;
5559 case ECORE_Q_STATE_ACTIVE:
5560 if (cmd == ECORE_Q_CMD_DEACTIVATE)
5561 next_state = ECORE_Q_STATE_INACTIVE;
5562
5563 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5564 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5565 next_state = ECORE_Q_STATE_ACTIVE;
5566
5567 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5568 next_state = ECORE_Q_STATE_MULTI_COS;
5569 next_tx_only = 1;
5570 }
5571
5572 else if (cmd == ECORE_Q_CMD_HALT)
5573 next_state = ECORE_Q_STATE_STOPPED;
5574
5575 else if (cmd == ECORE_Q_CMD_UPDATE) {
5576 /* If "active" state change is requested, update the
5577 * state accordingly.
5578 */
5579 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5580 &update_params->update_flags) &&
5581 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5582 &update_params->update_flags))
5583 next_state = ECORE_Q_STATE_INACTIVE;
5584 else
5585 next_state = ECORE_Q_STATE_ACTIVE;
5586 }
5587
5588 break;
5589 case ECORE_Q_STATE_MULTI_COS:
5590 if (cmd == ECORE_Q_CMD_TERMINATE)
5591 next_state = ECORE_Q_STATE_MCOS_TERMINATED;
5592
5593 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5594 next_state = ECORE_Q_STATE_MULTI_COS;
5595 next_tx_only = o->num_tx_only + 1;
5596 }
5597
5598 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5599 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5600 next_state = ECORE_Q_STATE_MULTI_COS;
5601
5602 else if (cmd == ECORE_Q_CMD_UPDATE) {
5603 /* If "active" state change is requested, update the
5604 * state accordingly.
5605 */
5606 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5607 &update_params->update_flags) &&
5608 !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5609 &update_params->update_flags))
5610 next_state = ECORE_Q_STATE_INACTIVE;
5611 else
5612 next_state = ECORE_Q_STATE_MULTI_COS;
5613 }
5614
5615 break;
5616 case ECORE_Q_STATE_MCOS_TERMINATED:
5617 if (cmd == ECORE_Q_CMD_CFC_DEL) {
5618 next_tx_only = o->num_tx_only - 1;
5619 if (next_tx_only == 0)
5620 next_state = ECORE_Q_STATE_ACTIVE;
5621 else
5622 next_state = ECORE_Q_STATE_MULTI_COS;
5623 }
5624
5625 break;
5626 case ECORE_Q_STATE_INACTIVE:
5627 if (cmd == ECORE_Q_CMD_ACTIVATE)
5628 next_state = ECORE_Q_STATE_ACTIVE;
5629
5630 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
5631 (cmd == ECORE_Q_CMD_UPDATE_TPA))
5632 next_state = ECORE_Q_STATE_INACTIVE;
5633
5634 else if (cmd == ECORE_Q_CMD_HALT)
5635 next_state = ECORE_Q_STATE_STOPPED;
5636
5637 else if (cmd == ECORE_Q_CMD_UPDATE) {
5638 /* If "active" state change is requested, update the
5639 * state accordingly.
5640 */
5641 if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
5642 &update_params->update_flags) &&
5643 ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
5644 &update_params->update_flags)){
5645 if (o->num_tx_only == 0)
5646 next_state = ECORE_Q_STATE_ACTIVE;
5647 else /* tx only queues exist for this queue */
5648 next_state = ECORE_Q_STATE_MULTI_COS;
5649 } else
5650 next_state = ECORE_Q_STATE_INACTIVE;
5651 }
5652
5653 break;
5654 case ECORE_Q_STATE_STOPPED:
5655 if (cmd == ECORE_Q_CMD_TERMINATE)
5656 next_state = ECORE_Q_STATE_TERMINATED;
5657
5658 break;
5659 case ECORE_Q_STATE_TERMINATED:
5660 if (cmd == ECORE_Q_CMD_CFC_DEL)
5661 next_state = ECORE_Q_STATE_RESET;
5662
5663 break;
5664 default:
5665 ECORE_ERR("Illegal state: %d\n", state);
5666 }
5667
5668 /* Transition is assured */
5669 if (next_state != ECORE_Q_STATE_MAX) {
5670 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5671 state, cmd, next_state);
5672 o->next_state = next_state;
5673 o->next_tx_only = next_tx_only;
5674 return ECORE_SUCCESS;
5675 }
5676
5677 ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5678
5679 return ECORE_INVAL;
5680 }
5681
5682 /**
5683 * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
5684 *
5685 * @sc: device handle
5686 * @o:
5687 * @params:
5688 *
5689 * It both checks if the requested command is legal in a current
5690 * state and, if it's legal, sets a `next_state' in the object
5691 * that will be used in the completion flow to set the `state'
5692 * of the object.
5693 *
5694 * returns 0 if a requested command is a legal transition,
5695 * ECORE_INVAL otherwise.
5696 */
ecore_queue_chk_fwd_transition(struct bxe_softc * sc,struct ecore_queue_sp_obj * o,struct ecore_queue_state_params * params)5697 static int ecore_queue_chk_fwd_transition(struct bxe_softc *sc,
5698 struct ecore_queue_sp_obj *o,
5699 struct ecore_queue_state_params *params)
5700 {
5701 enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
5702 enum ecore_queue_cmd cmd = params->cmd;
5703
5704 switch (state) {
5705 case ECORE_Q_STATE_RESET:
5706 if (cmd == ECORE_Q_CMD_INIT)
5707 next_state = ECORE_Q_STATE_INITIALIZED;
5708
5709 break;
5710 case ECORE_Q_STATE_INITIALIZED:
5711 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
5712 if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
5713 ¶ms->params.tx_only.flags))
5714 next_state = ECORE_Q_STATE_ACTIVE;
5715 else
5716 next_state = ECORE_Q_STATE_INACTIVE;
5717 }
5718
5719 break;
5720 case ECORE_Q_STATE_ACTIVE:
5721 case ECORE_Q_STATE_INACTIVE:
5722 if (cmd == ECORE_Q_CMD_CFC_DEL)
5723 next_state = ECORE_Q_STATE_RESET;
5724
5725 break;
5726 default:
5727 ECORE_ERR("Illegal state: %d\n", state);
5728 }
5729
5730 /* Transition is assured */
5731 if (next_state != ECORE_Q_STATE_MAX) {
5732 ECORE_MSG(sc, "Good state transition: %d(%d)->%d\n",
5733 state, cmd, next_state);
5734 o->next_state = next_state;
5735 return ECORE_SUCCESS;
5736 }
5737
5738 ECORE_MSG(sc, "Bad state transition request: %d %d\n", state, cmd);
5739 return ECORE_INVAL;
5740 }
5741
ecore_init_queue_obj(struct bxe_softc * sc,struct ecore_queue_sp_obj * obj,uint8_t cl_id,uint32_t * cids,uint8_t cid_cnt,uint8_t func_id,void * rdata,ecore_dma_addr_t rdata_mapping,unsigned long type)5742 void ecore_init_queue_obj(struct bxe_softc *sc,
5743 struct ecore_queue_sp_obj *obj,
5744 uint8_t cl_id, uint32_t *cids, uint8_t cid_cnt, uint8_t func_id,
5745 void *rdata,
5746 ecore_dma_addr_t rdata_mapping, unsigned long type)
5747 {
5748 ECORE_MEMSET(obj, 0, sizeof(*obj));
5749
5750 /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
5751 ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
5752
5753 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5754 obj->max_cos = cid_cnt;
5755 obj->cl_id = cl_id;
5756 obj->func_id = func_id;
5757 obj->rdata = rdata;
5758 obj->rdata_mapping = rdata_mapping;
5759 obj->type = type;
5760 obj->next_state = ECORE_Q_STATE_MAX;
5761
5762 if (CHIP_IS_E1x(sc))
5763 obj->send_cmd = ecore_queue_send_cmd_e1x;
5764 else
5765 obj->send_cmd = ecore_queue_send_cmd_e2;
5766
5767 if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
5768 obj->check_transition = ecore_queue_chk_fwd_transition;
5769 else
5770 obj->check_transition = ecore_queue_chk_transition;
5771
5772 obj->complete_cmd = ecore_queue_comp_cmd;
5773 obj->wait_comp = ecore_queue_wait_comp;
5774 obj->set_pending = ecore_queue_set_pending;
5775 }
5776
5777 /* return a queue object's logical state*/
ecore_get_q_logical_state(struct bxe_softc * sc,struct ecore_queue_sp_obj * obj)5778 int ecore_get_q_logical_state(struct bxe_softc *sc,
5779 struct ecore_queue_sp_obj *obj)
5780 {
5781 switch (obj->state) {
5782 case ECORE_Q_STATE_ACTIVE:
5783 case ECORE_Q_STATE_MULTI_COS:
5784 return ECORE_Q_LOGICAL_STATE_ACTIVE;
5785 case ECORE_Q_STATE_RESET:
5786 case ECORE_Q_STATE_INITIALIZED:
5787 case ECORE_Q_STATE_MCOS_TERMINATED:
5788 case ECORE_Q_STATE_INACTIVE:
5789 case ECORE_Q_STATE_STOPPED:
5790 case ECORE_Q_STATE_TERMINATED:
5791 case ECORE_Q_STATE_FLRED:
5792 return ECORE_Q_LOGICAL_STATE_STOPPED;
5793 default:
5794 return ECORE_INVAL;
5795 }
5796 }
5797
5798 /********************** Function state object *********************************/
ecore_func_get_state(struct bxe_softc * sc,struct ecore_func_sp_obj * o)5799 enum ecore_func_state ecore_func_get_state(struct bxe_softc *sc,
5800 struct ecore_func_sp_obj *o)
5801 {
5802 /* in the middle of transaction - return INVALID state */
5803 if (o->pending)
5804 return ECORE_F_STATE_MAX;
5805
5806 /* unsure the order of reading of o->pending and o->state
5807 * o->pending should be read first
5808 */
5809 rmb();
5810
5811 return o->state;
5812 }
5813
ecore_func_wait_comp(struct bxe_softc * sc,struct ecore_func_sp_obj * o,enum ecore_func_cmd cmd)5814 static int ecore_func_wait_comp(struct bxe_softc *sc,
5815 struct ecore_func_sp_obj *o,
5816 enum ecore_func_cmd cmd)
5817 {
5818 return ecore_state_wait(sc, cmd, &o->pending);
5819 }
5820
5821 /**
5822 * ecore_func_state_change_comp - complete the state machine transition
5823 *
5824 * @sc: device handle
5825 * @o:
5826 * @cmd:
5827 *
5828 * Called on state change transition. Completes the state
5829 * machine transition only - no HW interaction.
5830 */
ecore_func_state_change_comp(struct bxe_softc * sc,struct ecore_func_sp_obj * o,enum ecore_func_cmd cmd)5831 static inline int ecore_func_state_change_comp(struct bxe_softc *sc,
5832 struct ecore_func_sp_obj *o,
5833 enum ecore_func_cmd cmd)
5834 {
5835 unsigned long cur_pending = o->pending;
5836
5837 if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
5838 ECORE_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
5839 cmd, ECORE_FUNC_ID(sc), o->state,
5840 cur_pending, o->next_state);
5841 return ECORE_INVAL;
5842 }
5843
5844 ECORE_MSG(sc,
5845 "Completing command %d for func %d, setting state to %d\n",
5846 cmd, ECORE_FUNC_ID(sc), o->next_state);
5847
5848 o->state = o->next_state;
5849 o->next_state = ECORE_F_STATE_MAX;
5850
5851 /* It's important that o->state and o->next_state are
5852 * updated before o->pending.
5853 */
5854 wmb();
5855
5856 ECORE_CLEAR_BIT(cmd, &o->pending);
5857 ECORE_SMP_MB_AFTER_CLEAR_BIT();
5858
5859 return ECORE_SUCCESS;
5860 }
5861
5862 /**
5863 * ecore_func_comp_cmd - complete the state change command
5864 *
5865 * @sc: device handle
5866 * @o:
5867 * @cmd:
5868 *
5869 * Checks that the arrived completion is expected.
5870 */
ecore_func_comp_cmd(struct bxe_softc * sc,struct ecore_func_sp_obj * o,enum ecore_func_cmd cmd)5871 static int ecore_func_comp_cmd(struct bxe_softc *sc,
5872 struct ecore_func_sp_obj *o,
5873 enum ecore_func_cmd cmd)
5874 {
5875 /* Complete the state machine part first, check if it's a
5876 * legal completion.
5877 */
5878 int rc = ecore_func_state_change_comp(sc, o, cmd);
5879 return rc;
5880 }
5881
5882 /**
5883 * ecore_func_chk_transition - perform function state machine transition
5884 *
5885 * @sc: device handle
5886 * @o:
5887 * @params:
5888 *
5889 * It both checks if the requested command is legal in a current
5890 * state and, if it's legal, sets a `next_state' in the object
5891 * that will be used in the completion flow to set the `state'
5892 * of the object.
5893 *
5894 * returns 0 if a requested command is a legal transition,
5895 * ECORE_INVAL otherwise.
5896 */
ecore_func_chk_transition(struct bxe_softc * sc,struct ecore_func_sp_obj * o,struct ecore_func_state_params * params)5897 static int ecore_func_chk_transition(struct bxe_softc *sc,
5898 struct ecore_func_sp_obj *o,
5899 struct ecore_func_state_params *params)
5900 {
5901 enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
5902 enum ecore_func_cmd cmd = params->cmd;
5903
5904 /* Forget all pending for completion commands if a driver only state
5905 * transition has been requested.
5906 */
5907 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5908 o->pending = 0;
5909 o->next_state = ECORE_F_STATE_MAX;
5910 }
5911
5912 /* Don't allow a next state transition if we are in the middle of
5913 * the previous one.
5914 */
5915 if (o->pending)
5916 return ECORE_BUSY;
5917
5918 switch (state) {
5919 case ECORE_F_STATE_RESET:
5920 if (cmd == ECORE_F_CMD_HW_INIT)
5921 next_state = ECORE_F_STATE_INITIALIZED;
5922
5923 break;
5924 case ECORE_F_STATE_INITIALIZED:
5925 if (cmd == ECORE_F_CMD_START)
5926 next_state = ECORE_F_STATE_STARTED;
5927
5928 else if (cmd == ECORE_F_CMD_HW_RESET)
5929 next_state = ECORE_F_STATE_RESET;
5930
5931 break;
5932 case ECORE_F_STATE_STARTED:
5933 if (cmd == ECORE_F_CMD_STOP)
5934 next_state = ECORE_F_STATE_INITIALIZED;
5935 /* afex ramrods can be sent only in started mode, and only
5936 * if not pending for function_stop ramrod completion
5937 * for these events - next state remained STARTED.
5938 */
5939 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
5940 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5941 next_state = ECORE_F_STATE_STARTED;
5942
5943 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
5944 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5945 next_state = ECORE_F_STATE_STARTED;
5946
5947 /* Switch_update ramrod can be sent in either started or
5948 * tx_stopped state, and it doesn't change the state.
5949 */
5950 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5951 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5952 next_state = ECORE_F_STATE_STARTED;
5953
5954 else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
5955 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5956 next_state = ECORE_F_STATE_STARTED;
5957
5958 else if (cmd == ECORE_F_CMD_TX_STOP)
5959 next_state = ECORE_F_STATE_TX_STOPPED;
5960
5961 break;
5962 case ECORE_F_STATE_TX_STOPPED:
5963 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
5964 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5965 next_state = ECORE_F_STATE_TX_STOPPED;
5966
5967 else if ((cmd == ECORE_F_CMD_SET_TIMESYNC) &&
5968 (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
5969 next_state = ECORE_F_STATE_TX_STOPPED;
5970
5971 else if (cmd == ECORE_F_CMD_TX_START)
5972 next_state = ECORE_F_STATE_STARTED;
5973
5974 break;
5975 default:
5976 ECORE_ERR("Unknown state: %d\n", state);
5977 }
5978
5979 /* Transition is assured */
5980 if (next_state != ECORE_F_STATE_MAX) {
5981 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d\n",
5982 state, cmd, next_state);
5983 o->next_state = next_state;
5984 return ECORE_SUCCESS;
5985 }
5986
5987 ECORE_MSG(sc, "Bad function state transition request: %d %d\n",
5988 state, cmd);
5989
5990 return ECORE_INVAL;
5991 }
5992
5993 /**
5994 * ecore_func_init_func - performs HW init at function stage
5995 *
5996 * @sc: device handle
5997 * @drv:
5998 *
5999 * Init HW when the current phase is
6000 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
6001 * HW blocks.
6002 */
ecore_func_init_func(struct bxe_softc * sc,const struct ecore_func_sp_drv_ops * drv)6003 static inline int ecore_func_init_func(struct bxe_softc *sc,
6004 const struct ecore_func_sp_drv_ops *drv)
6005 {
6006 return drv->init_hw_func(sc);
6007 }
6008
6009 /**
6010 * ecore_func_init_port - performs HW init at port stage
6011 *
6012 * @sc: device handle
6013 * @drv:
6014 *
6015 * Init HW when the current phase is
6016 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
6017 * FUNCTION-only HW blocks.
6018 *
6019 */
ecore_func_init_port(struct bxe_softc * sc,const struct ecore_func_sp_drv_ops * drv)6020 static inline int ecore_func_init_port(struct bxe_softc *sc,
6021 const struct ecore_func_sp_drv_ops *drv)
6022 {
6023 int rc = drv->init_hw_port(sc);
6024 if (rc)
6025 return rc;
6026
6027 return ecore_func_init_func(sc, drv);
6028 }
6029
6030 /**
6031 * ecore_func_init_cmn_chip - performs HW init at chip-common stage
6032 *
6033 * @sc: device handle
6034 * @drv:
6035 *
6036 * Init HW when the current phase is
6037 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
6038 * PORT-only and FUNCTION-only HW blocks.
6039 */
ecore_func_init_cmn_chip(struct bxe_softc * sc,const struct ecore_func_sp_drv_ops * drv)6040 static inline int ecore_func_init_cmn_chip(struct bxe_softc *sc,
6041 const struct ecore_func_sp_drv_ops *drv)
6042 {
6043 int rc = drv->init_hw_cmn_chip(sc);
6044 if (rc)
6045 return rc;
6046
6047 return ecore_func_init_port(sc, drv);
6048 }
6049
6050 /**
6051 * ecore_func_init_cmn - performs HW init at common stage
6052 *
6053 * @sc: device handle
6054 * @drv:
6055 *
6056 * Init HW when the current phase is
6057 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
6058 * PORT-only and FUNCTION-only HW blocks.
6059 */
ecore_func_init_cmn(struct bxe_softc * sc,const struct ecore_func_sp_drv_ops * drv)6060 static inline int ecore_func_init_cmn(struct bxe_softc *sc,
6061 const struct ecore_func_sp_drv_ops *drv)
6062 {
6063 int rc = drv->init_hw_cmn(sc);
6064 if (rc)
6065 return rc;
6066
6067 return ecore_func_init_port(sc, drv);
6068 }
6069
ecore_func_hw_init(struct bxe_softc * sc,struct ecore_func_state_params * params)6070 static int ecore_func_hw_init(struct bxe_softc *sc,
6071 struct ecore_func_state_params *params)
6072 {
6073 uint32_t load_code = params->params.hw_init.load_phase;
6074 struct ecore_func_sp_obj *o = params->f_obj;
6075 const struct ecore_func_sp_drv_ops *drv = o->drv;
6076 int rc = 0;
6077
6078 ECORE_MSG(sc, "function %d load_code %x\n",
6079 ECORE_ABS_FUNC_ID(sc), load_code);
6080
6081 /* Prepare buffers for unzipping the FW */
6082 rc = drv->gunzip_init(sc);
6083 if (rc)
6084 return rc;
6085
6086 /* Prepare FW */
6087 rc = drv->init_fw(sc);
6088 if (rc) {
6089 ECORE_ERR("Error loading firmware\n");
6090 goto init_err;
6091 }
6092
6093 /* Handle the beginning of COMMON_XXX pases separately... */
6094 switch (load_code) {
6095 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6096 rc = ecore_func_init_cmn_chip(sc, drv);
6097 if (rc)
6098 goto init_err;
6099
6100 break;
6101 case FW_MSG_CODE_DRV_LOAD_COMMON:
6102 rc = ecore_func_init_cmn(sc, drv);
6103 if (rc)
6104 goto init_err;
6105
6106 break;
6107 case FW_MSG_CODE_DRV_LOAD_PORT:
6108 rc = ecore_func_init_port(sc, drv);
6109 if (rc)
6110 goto init_err;
6111
6112 break;
6113 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6114 rc = ecore_func_init_func(sc, drv);
6115 if (rc)
6116 goto init_err;
6117
6118 break;
6119 default:
6120 ECORE_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6121 rc = ECORE_INVAL;
6122 }
6123
6124 init_err:
6125 drv->gunzip_end(sc);
6126
6127 /* In case of success, complete the command immediately: no ramrods
6128 * have been sent.
6129 */
6130 if (!rc)
6131 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
6132
6133 return rc;
6134 }
6135
6136 /**
6137 * ecore_func_reset_func - reset HW at function stage
6138 *
6139 * @sc: device handle
6140 * @drv:
6141 *
6142 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
6143 * FUNCTION-only HW blocks.
6144 */
ecore_func_reset_func(struct bxe_softc * sc,const struct ecore_func_sp_drv_ops * drv)6145 static inline void ecore_func_reset_func(struct bxe_softc *sc,
6146 const struct ecore_func_sp_drv_ops *drv)
6147 {
6148 drv->reset_hw_func(sc);
6149 }
6150
6151 /**
6152 * ecore_func_reset_port - reser HW at port stage
6153 *
6154 * @sc: device handle
6155 * @drv:
6156 *
6157 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
6158 * FUNCTION-only and PORT-only HW blocks.
6159 *
6160 * !!!IMPORTANT!!!
6161 *
6162 * It's important to call reset_port before reset_func() as the last thing
6163 * reset_func does is pf_disable() thus disabling PGLUE_B, which
6164 * makes impossible any DMAE transactions.
6165 */
ecore_func_reset_port(struct bxe_softc * sc,const struct ecore_func_sp_drv_ops * drv)6166 static inline void ecore_func_reset_port(struct bxe_softc *sc,
6167 const struct ecore_func_sp_drv_ops *drv)
6168 {
6169 drv->reset_hw_port(sc);
6170 ecore_func_reset_func(sc, drv);
6171 }
6172
6173 /**
6174 * ecore_func_reset_cmn - reser HW at common stage
6175 *
6176 * @sc: device handle
6177 * @drv:
6178 *
6179 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
6180 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
6181 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
6182 */
ecore_func_reset_cmn(struct bxe_softc * sc,const struct ecore_func_sp_drv_ops * drv)6183 static inline void ecore_func_reset_cmn(struct bxe_softc *sc,
6184 const struct ecore_func_sp_drv_ops *drv)
6185 {
6186 ecore_func_reset_port(sc, drv);
6187 drv->reset_hw_cmn(sc);
6188 }
6189
ecore_func_hw_reset(struct bxe_softc * sc,struct ecore_func_state_params * params)6190 static inline int ecore_func_hw_reset(struct bxe_softc *sc,
6191 struct ecore_func_state_params *params)
6192 {
6193 uint32_t reset_phase = params->params.hw_reset.reset_phase;
6194 struct ecore_func_sp_obj *o = params->f_obj;
6195 const struct ecore_func_sp_drv_ops *drv = o->drv;
6196
6197 ECORE_MSG(sc, "function %d reset_phase %x\n", ECORE_ABS_FUNC_ID(sc),
6198 reset_phase);
6199
6200 switch (reset_phase) {
6201 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6202 ecore_func_reset_cmn(sc, drv);
6203 break;
6204 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6205 ecore_func_reset_port(sc, drv);
6206 break;
6207 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6208 ecore_func_reset_func(sc, drv);
6209 break;
6210 default:
6211 ECORE_ERR("Unknown reset_phase (0x%x) from MCP\n",
6212 reset_phase);
6213 break;
6214 }
6215
6216 /* Complete the command immediately: no ramrods have been sent. */
6217 o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
6218
6219 return ECORE_SUCCESS;
6220 }
6221
ecore_func_send_start(struct bxe_softc * sc,struct ecore_func_state_params * params)6222 static inline int ecore_func_send_start(struct bxe_softc *sc,
6223 struct ecore_func_state_params *params)
6224 {
6225 struct ecore_func_sp_obj *o = params->f_obj;
6226 struct function_start_data *rdata =
6227 (struct function_start_data *)o->rdata;
6228 ecore_dma_addr_t data_mapping = o->rdata_mapping;
6229 struct ecore_func_start_params *start_params = ¶ms->params.start;
6230
6231 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6232
6233 /* Fill the ramrod data with provided parameters */
6234 rdata->function_mode = (uint8_t)start_params->mf_mode;
6235 rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
6236 rdata->path_id = ECORE_PATH_ID(sc);
6237 rdata->network_cos_mode = start_params->network_cos_mode;
6238
6239 rdata->vxlan_dst_port = start_params->vxlan_dst_port;
6240 rdata->geneve_dst_port = start_params->geneve_dst_port;
6241 rdata->inner_clss_l2gre = start_params->inner_clss_l2gre;
6242 rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
6243 rdata->inner_clss_vxlan = start_params->inner_clss_vxlan;
6244 rdata->inner_rss = start_params->inner_rss;
6245
6246 rdata->sd_accept_mf_clss_fail = start_params->class_fail;
6247 if (start_params->class_fail_ethtype) {
6248 rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
6249 rdata->sd_accept_mf_clss_fail_ethtype =
6250 ECORE_CPU_TO_LE16(start_params->class_fail_ethtype);
6251 }
6252 rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri;
6253 rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val;
6254
6255 /** @@@TMP - until FW 7.10.7 (which will introduce an HSI change)
6256 * `sd_vlan_eth_type' will replace ethertype in SD mode even if
6257 * it's set to 0; This will probably break SD, so we're setting it
6258 * to ethertype 0x8100 for now.
6259 */
6260 if (start_params->sd_vlan_eth_type)
6261 rdata->sd_vlan_eth_type =
6262 ECORE_CPU_TO_LE16(start_params->sd_vlan_eth_type);
6263 else
6264 rdata->sd_vlan_eth_type =
6265 ECORE_CPU_TO_LE16((uint16_t) 0x8100);
6266
6267 rdata->no_added_tags = start_params->no_added_tags;
6268
6269 rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
6270 if (rdata->c2s_pri_tt_valid) {
6271 memcpy(rdata->c2s_pri_trans_table.val,
6272 start_params->c2s_pri,
6273 MAX_VLAN_PRIORITIES);
6274 rdata->c2s_pri_default = start_params->c2s_pri_default;
6275 }
6276
6277 /* No need for an explicit memory barrier here as long as we
6278 * ensure the ordering of writing to the SPQ element
6279 * and updating of the SPQ producer which involves a memory
6280 * read. If the memory read is removed we will have to put a
6281 * full memory barrier there (inside ecore_sp_post()).
6282 */
6283 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
6284 data_mapping, NONE_CONNECTION_TYPE);
6285 }
6286
ecore_func_send_switch_update(struct bxe_softc * sc,struct ecore_func_state_params * params)6287 static inline int ecore_func_send_switch_update(struct bxe_softc *sc,
6288 struct ecore_func_state_params *params)
6289 {
6290 struct ecore_func_sp_obj *o = params->f_obj;
6291 struct function_update_data *rdata =
6292 (struct function_update_data *)o->rdata;
6293 ecore_dma_addr_t data_mapping = o->rdata_mapping;
6294 struct ecore_func_switch_update_params *switch_update_params =
6295 ¶ms->params.switch_update;
6296
6297 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6298
6299 /* Fill the ramrod data with provided parameters */
6300 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
6301 &switch_update_params->changes)) {
6302 rdata->tx_switch_suspend_change_flg = 1;
6303 rdata->tx_switch_suspend =
6304 ECORE_TEST_BIT(ECORE_F_UPDATE_TX_SWITCH_SUSPEND,
6305 &switch_update_params->changes);
6306 }
6307
6308 if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_TAG_CHNG,
6309 &switch_update_params->changes)) {
6310 rdata->sd_vlan_tag_change_flg = 1;
6311 rdata->sd_vlan_tag =
6312 ECORE_CPU_TO_LE16(switch_update_params->vlan);
6313 }
6314
6315 if (ECORE_TEST_BIT(ECORE_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG,
6316 &switch_update_params->changes)) {
6317 rdata->sd_vlan_eth_type_change_flg = 1;
6318 rdata->sd_vlan_eth_type =
6319 ECORE_CPU_TO_LE16(switch_update_params->vlan_eth_type);
6320 }
6321
6322 if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
6323 &switch_update_params->changes)) {
6324 rdata->sd_vlan_force_pri_change_flg = 1;
6325 if (ECORE_TEST_BIT(ECORE_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
6326 &switch_update_params->changes))
6327 rdata->sd_vlan_force_pri_flg = 1;
6328 rdata->sd_vlan_force_pri_flg =
6329 switch_update_params->vlan_force_prio;
6330 }
6331
6332 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_CFG_CHNG,
6333 &switch_update_params->changes)) {
6334 rdata->update_tunn_cfg_flg = 1;
6335 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
6336 &switch_update_params->changes))
6337 rdata->inner_clss_l2gre = 1;
6338 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
6339 &switch_update_params->changes))
6340 rdata->inner_clss_vxlan = 1;
6341 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
6342 &switch_update_params->changes))
6343 rdata->inner_clss_l2geneve = 1;
6344 if (ECORE_TEST_BIT(ECORE_F_UPDATE_TUNNEL_INNER_RSS,
6345 &switch_update_params->changes))
6346 rdata->inner_rss = 1;
6347
6348 rdata->vxlan_dst_port =
6349 ECORE_CPU_TO_LE16(switch_update_params->vxlan_dst_port);
6350 rdata->geneve_dst_port =
6351 ECORE_CPU_TO_LE16(switch_update_params->geneve_dst_port);
6352 }
6353
6354 rdata->echo = SWITCH_UPDATE;
6355
6356 /* No need for an explicit memory barrier here as long as we
6357 * ensure the ordering of writing to the SPQ element
6358 * and updating of the SPQ producer which involves a memory
6359 * read. If the memory read is removed we will have to put a
6360 * full memory barrier there (inside ecore_sp_post()).
6361 */
6362 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6363 data_mapping, NONE_CONNECTION_TYPE);
6364 }
6365
ecore_func_send_afex_update(struct bxe_softc * sc,struct ecore_func_state_params * params)6366 static inline int ecore_func_send_afex_update(struct bxe_softc *sc,
6367 struct ecore_func_state_params *params)
6368 {
6369 struct ecore_func_sp_obj *o = params->f_obj;
6370 struct function_update_data *rdata =
6371 (struct function_update_data *)o->afex_rdata;
6372 ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
6373 struct ecore_func_afex_update_params *afex_update_params =
6374 ¶ms->params.afex_update;
6375
6376 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6377
6378 /* Fill the ramrod data with provided parameters */
6379 rdata->vif_id_change_flg = 1;
6380 rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
6381 rdata->afex_default_vlan_change_flg = 1;
6382 rdata->afex_default_vlan =
6383 ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
6384 rdata->allowed_priorities_change_flg = 1;
6385 rdata->allowed_priorities = afex_update_params->allowed_priorities;
6386 rdata->echo = AFEX_UPDATE;
6387
6388 /* No need for an explicit memory barrier here as long as we
6389 * ensure the ordering of writing to the SPQ element
6390 * and updating of the SPQ producer which involves a memory
6391 * read. If the memory read is removed we will have to put a
6392 * full memory barrier there (inside ecore_sp_post()).
6393 */
6394 ECORE_MSG(sc,
6395 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
6396 rdata->vif_id,
6397 rdata->afex_default_vlan, rdata->allowed_priorities);
6398
6399 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
6400 data_mapping, NONE_CONNECTION_TYPE);
6401 }
6402
6403 static
ecore_func_send_afex_viflists(struct bxe_softc * sc,struct ecore_func_state_params * params)6404 inline int ecore_func_send_afex_viflists(struct bxe_softc *sc,
6405 struct ecore_func_state_params *params)
6406 {
6407 struct ecore_func_sp_obj *o = params->f_obj;
6408 struct afex_vif_list_ramrod_data *rdata =
6409 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
6410 struct ecore_func_afex_viflists_params *afex_vif_params =
6411 ¶ms->params.afex_viflists;
6412 uint64_t *p_rdata = (uint64_t *)rdata;
6413
6414 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6415
6416 /* Fill the ramrod data with provided parameters */
6417 rdata->vif_list_index = ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
6418 rdata->func_bit_map = afex_vif_params->func_bit_map;
6419 rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
6420 rdata->func_to_clear = afex_vif_params->func_to_clear;
6421
6422 /* send in echo type of sub command */
6423 rdata->echo = afex_vif_params->afex_vif_list_command;
6424
6425 ECORE_MSG(sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
6426 rdata->afex_vif_list_command, rdata->vif_list_index,
6427 rdata->func_bit_map, rdata->func_to_clear);
6428
6429 /* No need for an explicit memory barrier here as long as we
6430 * ensure the ordering of writing to the SPQ element
6431 * and updating of the SPQ producer which involves a memory
6432 * read. If the memory read is removed we will have to put a
6433 * full memory barrier there (inside ecore_sp_post()).
6434 */
6435
6436 /* this ramrod sends data directly and not through DMA mapping */
6437 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
6438 *p_rdata, NONE_CONNECTION_TYPE);
6439 }
6440
ecore_func_send_stop(struct bxe_softc * sc,struct ecore_func_state_params * params)6441 static inline int ecore_func_send_stop(struct bxe_softc *sc,
6442 struct ecore_func_state_params *params)
6443 {
6444 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
6445 NONE_CONNECTION_TYPE);
6446 }
6447
ecore_func_send_tx_stop(struct bxe_softc * sc,struct ecore_func_state_params * params)6448 static inline int ecore_func_send_tx_stop(struct bxe_softc *sc,
6449 struct ecore_func_state_params *params)
6450 {
6451 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
6452 NONE_CONNECTION_TYPE);
6453 }
ecore_func_send_tx_start(struct bxe_softc * sc,struct ecore_func_state_params * params)6454 static inline int ecore_func_send_tx_start(struct bxe_softc *sc,
6455 struct ecore_func_state_params *params)
6456 {
6457 struct ecore_func_sp_obj *o = params->f_obj;
6458 struct flow_control_configuration *rdata =
6459 (struct flow_control_configuration *)o->rdata;
6460 ecore_dma_addr_t data_mapping = o->rdata_mapping;
6461 struct ecore_func_tx_start_params *tx_start_params =
6462 ¶ms->params.tx_start;
6463 int i;
6464
6465 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6466
6467 rdata->dcb_enabled = tx_start_params->dcb_enabled;
6468 rdata->dcb_version = tx_start_params->dcb_version;
6469 rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
6470
6471 for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
6472 rdata->traffic_type_to_priority_cos[i] =
6473 tx_start_params->traffic_type_to_priority_cos[i];
6474
6475 for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
6476 rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
6477
6478 /* No need for an explicit memory barrier here as long as we
6479 * ensure the ordering of writing to the SPQ element
6480 * and updating of the SPQ producer which involves a memory
6481 * read. If the memory read is removed we will have to put a
6482 * full memory barrier there (inside ecore_sp_post()).
6483 */
6484 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
6485 data_mapping, NONE_CONNECTION_TYPE);
6486 }
6487
ecore_func_send_set_timesync(struct bxe_softc * sc,struct ecore_func_state_params * params)6488 static inline int ecore_func_send_set_timesync(struct bxe_softc *sc,
6489 struct ecore_func_state_params *params)
6490 {
6491 struct ecore_func_sp_obj *o = params->f_obj;
6492 struct set_timesync_ramrod_data *rdata =
6493 (struct set_timesync_ramrod_data *)o->rdata;
6494 ecore_dma_addr_t data_mapping = o->rdata_mapping;
6495 struct ecore_func_set_timesync_params *set_timesync_params =
6496 ¶ms->params.set_timesync;
6497
6498 ECORE_MEMSET(rdata, 0, sizeof(*rdata));
6499
6500 /* Fill the ramrod data with provided parameters */
6501 rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd;
6502 rdata->offset_cmd = set_timesync_params->offset_cmd;
6503 rdata->add_sub_drift_adjust_value =
6504 set_timesync_params->add_sub_drift_adjust_value;
6505 rdata->drift_adjust_value = set_timesync_params->drift_adjust_value;
6506 rdata->drift_adjust_period = set_timesync_params->drift_adjust_period;
6507 rdata->offset_delta.lo =
6508 ECORE_CPU_TO_LE32(U64_LO(set_timesync_params->offset_delta));
6509 rdata->offset_delta.hi =
6510 ECORE_CPU_TO_LE32(U64_HI(set_timesync_params->offset_delta));
6511
6512 ECORE_MSG(sc, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n",
6513 rdata->drift_adjust_cmd, rdata->offset_cmd,
6514 rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value,
6515 rdata->drift_adjust_period, rdata->offset_delta.lo,
6516 rdata->offset_delta.hi);
6517
6518 return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0,
6519 data_mapping, NONE_CONNECTION_TYPE);
6520 }
6521
ecore_func_send_cmd(struct bxe_softc * sc,struct ecore_func_state_params * params)6522 static int ecore_func_send_cmd(struct bxe_softc *sc,
6523 struct ecore_func_state_params *params)
6524 {
6525 switch (params->cmd) {
6526 case ECORE_F_CMD_HW_INIT:
6527 return ecore_func_hw_init(sc, params);
6528 case ECORE_F_CMD_START:
6529 return ecore_func_send_start(sc, params);
6530 case ECORE_F_CMD_STOP:
6531 return ecore_func_send_stop(sc, params);
6532 case ECORE_F_CMD_HW_RESET:
6533 return ecore_func_hw_reset(sc, params);
6534 case ECORE_F_CMD_AFEX_UPDATE:
6535 return ecore_func_send_afex_update(sc, params);
6536 case ECORE_F_CMD_AFEX_VIFLISTS:
6537 return ecore_func_send_afex_viflists(sc, params);
6538 case ECORE_F_CMD_TX_STOP:
6539 return ecore_func_send_tx_stop(sc, params);
6540 case ECORE_F_CMD_TX_START:
6541 return ecore_func_send_tx_start(sc, params);
6542 case ECORE_F_CMD_SWITCH_UPDATE:
6543 return ecore_func_send_switch_update(sc, params);
6544 case ECORE_F_CMD_SET_TIMESYNC:
6545 return ecore_func_send_set_timesync(sc, params);
6546 default:
6547 ECORE_ERR("Unknown command: %d\n", params->cmd);
6548 return ECORE_INVAL;
6549 }
6550 }
6551
ecore_init_func_obj(struct bxe_softc * sc,struct ecore_func_sp_obj * obj,void * rdata,ecore_dma_addr_t rdata_mapping,void * afex_rdata,ecore_dma_addr_t afex_rdata_mapping,struct ecore_func_sp_drv_ops * drv_iface)6552 void ecore_init_func_obj(struct bxe_softc *sc,
6553 struct ecore_func_sp_obj *obj,
6554 void *rdata, ecore_dma_addr_t rdata_mapping,
6555 void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
6556 struct ecore_func_sp_drv_ops *drv_iface)
6557 {
6558 ECORE_MEMSET(obj, 0, sizeof(*obj));
6559
6560 ECORE_MUTEX_INIT(&obj->one_pending_mutex);
6561
6562 obj->rdata = rdata;
6563 obj->rdata_mapping = rdata_mapping;
6564 obj->afex_rdata = afex_rdata;
6565 obj->afex_rdata_mapping = afex_rdata_mapping;
6566 obj->send_cmd = ecore_func_send_cmd;
6567 obj->check_transition = ecore_func_chk_transition;
6568 obj->complete_cmd = ecore_func_comp_cmd;
6569 obj->wait_comp = ecore_func_wait_comp;
6570 obj->drv = drv_iface;
6571 }
6572
6573 /**
6574 * ecore_func_state_change - perform Function state change transition
6575 *
6576 * @sc: device handle
6577 * @params: parameters to perform the transaction
6578 *
6579 * returns 0 in case of successfully completed transition,
6580 * negative error code in case of failure, positive
6581 * (EBUSY) value if there is a completion to that is
6582 * still pending (possible only if RAMROD_COMP_WAIT is
6583 * not set in params->ramrod_flags for asynchronous
6584 * commands).
6585 */
ecore_func_state_change(struct bxe_softc * sc,struct ecore_func_state_params * params)6586 int ecore_func_state_change(struct bxe_softc *sc,
6587 struct ecore_func_state_params *params)
6588 {
6589 struct ecore_func_sp_obj *o = params->f_obj;
6590 int rc, cnt = 300;
6591 enum ecore_func_cmd cmd = params->cmd;
6592 unsigned long *pending = &o->pending;
6593
6594 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6595
6596 /* Check that the requested transition is legal */
6597 rc = o->check_transition(sc, o, params);
6598 if ((rc == ECORE_BUSY) &&
6599 (ECORE_TEST_BIT(RAMROD_RETRY, ¶ms->ramrod_flags))) {
6600 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
6601 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6602 ECORE_MSLEEP(10);
6603 ECORE_MUTEX_LOCK(&o->one_pending_mutex);
6604 rc = o->check_transition(sc, o, params);
6605 }
6606 if (rc == ECORE_BUSY) {
6607 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6608 ECORE_ERR("timeout waiting for previous ramrod completion\n");
6609 return rc;
6610 }
6611 } else if (rc) {
6612 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6613 return rc;
6614 }
6615
6616 /* Set "pending" bit */
6617 ECORE_SET_BIT(cmd, pending);
6618
6619 /* Don't send a command if only driver cleanup was requested */
6620 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
6621 ecore_func_state_change_comp(sc, o, cmd);
6622 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6623 } else {
6624 /* Send a ramrod */
6625 rc = o->send_cmd(sc, params);
6626
6627 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
6628
6629 if (rc) {
6630 o->next_state = ECORE_F_STATE_MAX;
6631 ECORE_CLEAR_BIT(cmd, pending);
6632 ECORE_SMP_MB_AFTER_CLEAR_BIT();
6633 return rc;
6634 }
6635
6636 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
6637 rc = o->wait_comp(sc, o, cmd);
6638 if (rc)
6639 return rc;
6640
6641 return ECORE_SUCCESS;
6642 }
6643 }
6644
6645 return ECORE_RET_PENDING(cmd, pending);
6646 }
6647