Lines Matching defs:o
41 * @o: pointer to the object
51 struct bnx2x_exe_queue_obj *o,
60 memset(o, 0, sizeof(*o));
62 INIT_LIST_HEAD(&o->exe_queue);
63 INIT_LIST_HEAD(&o->pending_comp);
65 spin_lock_init(&o->lock);
67 o->exe_chunk_len = exe_len;
68 o->owner = owner;
71 o->validate = validate;
72 o->remove = remove;
73 o->optimize = optimize;
74 o->execute = exec;
75 o->get = get;
88 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
93 spin_lock_bh(&o->lock);
95 list_for_each_entry(elem, &o->exe_queue, link)
98 spin_unlock_bh(&o->lock);
107 * @o: queue
114 struct bnx2x_exe_queue_obj *o,
120 spin_lock_bh(&o->lock);
124 rc = o->optimize(bp, o->owner, elem);
129 rc = o->validate(bp, o->owner, elem);
137 list_add_tail(&elem->link, &o->exe_queue);
139 spin_unlock_bh(&o->lock);
146 spin_unlock_bh(&o->lock);
153 struct bnx2x_exe_queue_obj *o)
157 while (!list_empty(&o->pending_comp)) {
158 elem = list_first_entry(&o->pending_comp,
170 * @o: queue
176 struct bnx2x_exe_queue_obj *o,
190 if (!list_empty(&o->pending_comp)) {
193 __bnx2x_exe_queue_reset_pending(bp, o);
202 while (!list_empty(&o->exe_queue)) {
203 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
207 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
213 list_add_tail(&spacer.link, &o->pending_comp);
215 list_move_tail(&elem->link, &o->pending_comp);
225 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
230 list_splice_init(&o->pending_comp, &o->exe_queue);
235 __bnx2x_exe_queue_reset_pending(bp, o);
240 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
242 bool empty = list_empty(&o->exe_queue);
247 return empty && list_empty(&o->pending_comp);
258 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
260 return !!test_bit(o->state, o->pstate);
263 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
266 clear_bit(o->state, o->pstate);
270 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
273 set_bit(o->state, o->pstate);
327 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
329 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
336 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
338 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
345 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
347 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
354 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
356 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
363 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
365 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
366 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
379 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
381 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
386 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
388 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
393 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
395 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
400 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
402 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
407 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
409 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
410 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
427 * @o: vlan_mac object
433 struct bnx2x_vlan_mac_obj *o)
435 if (o->head_reader) {
448 * @o: vlan_mac object
454 struct bnx2x_vlan_mac_obj *o)
457 unsigned long ramrod_flags = o->saved_ramrod_flags;
461 o->head_exe_request = false;
462 o->saved_ramrod_flags = 0;
463 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
477 * @o: vlan_mac object
483 struct bnx2x_vlan_mac_obj *o,
486 o->head_exe_request = true;
487 o->saved_ramrod_flags = ramrod_flags;
496 * @o: vlan_mac object
503 struct bnx2x_vlan_mac_obj *o)
508 while (o->head_exe_request) {
510 __bnx2x_vlan_mac_h_exec_pending(bp, o);
519 * @o: vlan_mac object
525 struct bnx2x_vlan_mac_obj *o)
528 o->head_reader++;
530 o->head_reader);
539 * @o: vlan_mac object
544 struct bnx2x_vlan_mac_obj *o)
548 spin_lock_bh(&o->exe_queue.lock);
549 rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
550 spin_unlock_bh(&o->exe_queue.lock);
559 * @o: vlan_mac object
566 struct bnx2x_vlan_mac_obj *o)
568 if (!o->head_reader) {
574 o->head_reader--;
576 o->head_reader);
582 if (!o->head_reader && o->head_exe_request) {
586 __bnx2x_vlan_mac_h_write_unlock(bp, o);
594 * @o: vlan_mac object
601 struct bnx2x_vlan_mac_obj *o)
603 spin_lock_bh(&o->exe_queue.lock);
604 __bnx2x_vlan_mac_h_read_unlock(bp, o);
605 spin_unlock_bh(&o->exe_queue.lock);
608 static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
617 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
622 list_for_each_entry(pos, &o->head, link) {
634 bnx2x_vlan_mac_h_read_unlock(bp, o);
642 struct bnx2x_vlan_mac_obj *o,
653 list_for_each_entry(pos, &o->head, link)
662 struct bnx2x_vlan_mac_obj *o,
669 list_for_each_entry(pos, &o->head, link)
677 struct bnx2x_vlan_mac_obj *o,
685 list_for_each_entry(pos, &o->head, link)
699 struct bnx2x_vlan_mac_obj *o,
706 list_for_each_entry(pos, &o->head, link)
716 struct bnx2x_vlan_mac_obj *o,
723 list_for_each_entry(pos, &o->head, link)
732 struct bnx2x_vlan_mac_obj *o,
740 list_for_each_entry(pos, &o->head, link)
786 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
788 struct bnx2x_raw_obj *raw = &o->raw;
837 * @o: queue for which we want to configure this rule
844 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
847 struct bnx2x_raw_obj *raw = &o->raw;
854 bnx2x_vlan_mac_get_rx_tx_flag(o);
884 struct bnx2x_vlan_mac_obj *o,
888 struct bnx2x_raw_obj *raw = &o->raw;
928 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
972 * @o: queue
980 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
983 struct bnx2x_raw_obj *r = &o->raw;
993 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
996 struct bnx2x_raw_obj *r = &o->raw;
1019 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
1023 struct bnx2x_raw_obj *raw = &o->raw;
1025 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
1027 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
1039 * @o: bnx2x_vlan_mac_obj
1045 struct bnx2x_vlan_mac_obj *o,
1049 struct bnx2x_raw_obj *raw = &o->raw;
1061 bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
1068 struct bnx2x_vlan_mac_obj *o,
1072 struct bnx2x_raw_obj *raw = &o->raw;
1086 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
1118 struct bnx2x_vlan_mac_obj *o,
1122 struct bnx2x_raw_obj *raw = &o->raw;
1138 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
1178 * @o: bnx2x_vlan_mac_obj
1184 struct bnx2x_vlan_mac_obj *o,
1188 struct bnx2x_raw_obj *raw = &o->raw;
1200 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
1231 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1234 if (list_empty(&o->head)) {
1241 *ppos = list_first_entry(&o->head,
1250 if (list_is_last(&pos->link, &o->head))
1273 struct bnx2x_exe_queue_obj *o,
1280 list_for_each_entry(pos, &o->exe_queue, link)
1290 struct bnx2x_exe_queue_obj *o,
1297 list_for_each_entry(pos, &o->exe_queue, link)
1307 struct bnx2x_exe_queue_obj *o,
1315 list_for_each_entry(pos, &o->exe_queue, link)
1342 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1343 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1347 rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
1368 o->get_credit(o)))
1390 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1392 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1398 pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1425 o->put_credit(o))) {
1556 * @o: bnx2x_vlan_mac_obj
1560 struct bnx2x_vlan_mac_obj *o)
1563 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1564 struct bnx2x_raw_obj *raw = &o->raw;
1583 struct bnx2x_vlan_mac_obj *o,
1588 spin_lock_bh(&o->exe_queue.lock);
1591 rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
1594 __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
1601 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1603 spin_unlock_bh(&o->exe_queue.lock);
1612 * @o: bnx2x_vlan_mac_obj
1618 struct bnx2x_vlan_mac_obj *o,
1622 struct bnx2x_raw_obj *r = &o->raw;
1628 spin_lock_bh(&o->exe_queue.lock);
1631 __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1636 spin_unlock_bh(&o->exe_queue.lock);
1644 rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
1651 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1669 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1670 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1694 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1697 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1719 * @o: vlan object
1728 struct bnx2x_vlan_mac_obj *o,
1744 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1763 reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
1785 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1786 struct bnx2x_raw_obj *r = &o->raw;
1811 cam_obj = o;
1828 o->set_one_rule(bp, o, elem, idx,
1845 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1858 reg_elem = o->check_del(bp, o,
1863 o->put_cam_offset(o, reg_elem->cam_offset);
1884 cam_obj = o;
1890 reg_elem = o->check_del(bp, cam_obj,
1907 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1928 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1942 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1945 struct bnx2x_raw_obj *raw = &o->raw;
1959 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1983 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1985 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
2011 * @o: vlan object info
2021 struct bnx2x_vlan_mac_obj *o,
2027 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
2056 p.vlan_mac_obj = o;
2068 read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
2072 list_for_each_entry(pos, &o->head, link) {
2081 bnx2x_vlan_mac_h_read_unlock(bp, o);
2088 bnx2x_vlan_mac_h_read_unlock(bp, o);
2114 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
2120 INIT_LIST_HEAD(&o->head);
2121 o->head_reader = 0;
2122 o->head_exe_request = false;
2123 o->saved_ramrod_flags = 0;
2125 o->macs_pool = macs_pool;
2126 o->vlans_pool = vlans_pool;
2128 o->delete_all = bnx2x_vlan_mac_del_all;
2129 o->restore = bnx2x_vlan_mac_restore;
2130 o->complete = bnx2x_complete_vlan_mac;
2131 o->wait = bnx2x_wait_vlan_mac;
2133 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
2582 struct bnx2x_rx_mode_obj *o)
2585 o->wait_comp = bnx2x_empty_rx_mode_wait;
2586 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2588 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2589 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2649 struct bnx2x_mcast_obj *o)
2651 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2652 o->raw.wait_comp(bp, &o->raw))
2672 struct bnx2x_mcast_obj *o,
2762 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2764 o->set_sched(o);
2772 * @o: multicast object info
2777 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2782 if (o->registry.aprox_match.vec[i])
2785 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2800 * @o:
2804 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2806 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2809 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2814 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2816 struct bnx2x_raw_obj *raw = &o->raw;
2831 struct bnx2x_mcast_obj *o, int idx,
2835 struct bnx2x_raw_obj *r = &o->raw;
2839 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2852 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2862 bin = bnx2x_mcast_clear_first_bin(o);
2871 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2876 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, bin);
2890 data->rules[idx].engine_id = o->engine_id;
2897 * @o: multicast object info
2904 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2911 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2912 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2915 o->set_one_rule(bp, o, cnt, &cfg_data,
2925 if (cnt >= o->max_cmd_len)
2935 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2946 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2958 if (cnt >= o->max_cmd_len)
2970 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2976 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2988 if (cnt >= o->max_cmd_len)
3000 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
3003 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
3007 /* If o->set_restore returned -1 we are done */
3016 struct bnx2x_mcast_obj *o,
3026 memcpy(cur, o->registry.aprox_match.vec,
3075 o->total_pending_num -= (o->max_cmd_len + mac_cnt);
3076 o->total_pending_num += cnt;
3078 DP(BNX2X_MSG_SP, "o->total_pending_num=%d\n", o->total_pending_num);
3083 struct bnx2x_mcast_obj *o,
3098 bnx2x_mcast_hdl_pending_set_e2_convert(bp, o, cmd_pos);
3103 o->set_one_rule(bp, o, *cnt, &cfg_data, p_item->type);
3109 if (*cnt >= o->max_cmd_len)
3123 struct bnx2x_mcast_obj *o = p->mcast_obj;
3125 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
3129 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
3133 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
3137 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
3142 bnx2x_mcast_hdl_pending_set_e2(bp, o, cmd_pos, &cnt);
3160 if (cnt >= o->max_cmd_len)
3168 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3177 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
3189 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3195 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
3223 struct bnx2x_mcast_obj *o = p->mcast_obj;
3230 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
3234 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
3238 o->hdl_restore(bp, o, 0, &cnt);
3256 struct bnx2x_mcast_obj *o = p->mcast_obj;
3257 int reg_sz = o->get_registry_size(o);
3262 o->set_registry_size(o, 0);
3283 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3295 o->set_registry_size(o, reg_sz + p->mcast_list_len);
3296 o->total_pending_num += o->max_cmd_len;
3305 o->total_pending_num += p->mcast_list_len;
3315 struct bnx2x_mcast_obj *o = p->mcast_obj;
3317 o->set_registry_size(o, old_num_bins);
3318 o->total_pending_num -= p->mcast_list_len;
3321 o->total_pending_num -= o->max_cmd_len;
3349 * @o:
3357 struct bnx2x_mcast_obj *o)
3363 elem = o->registry.aprox_match.vec[i];
3368 o->set_registry_size(o, cnt);
3378 struct bnx2x_mcast_obj *o = p->mcast_obj;
3389 if (list_empty(&o->pending_cmds_head))
3390 o->clear_sched(o);
3404 o->total_pending_num -= cnt;
3407 WARN_ON(o->total_pending_num < 0);
3408 WARN_ON(cnt > o->max_cmd_len);
3427 if (!o->total_pending_num)
3428 bnx2x_mcast_refresh_registry_e2(bp, o);
3489 struct bnx2x_mcast_obj *o,
3504 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3510 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3515 for (bit = bnx2x_mcast_get_next_bin(o, 0);
3517 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3532 struct bnx2x_mcast_obj *o = p->mcast_obj;
3533 struct bnx2x_raw_obj *r = &o->raw;
3546 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3554 memset(o->registry.aprox_match.vec, 0,
3555 sizeof(o->registry.aprox_match.vec));
3559 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3572 memset(o->registry.aprox_match.vec, 0,
3573 sizeof(o->registry.aprox_match.vec));
3585 struct bnx2x_mcast_obj *o = p->mcast_obj;
3586 int reg_sz = o->get_registry_size(o);
3596 o->set_registry_size(o, 0);
3612 if (p->mcast_list_len > o->max_cmd_len) {
3614 o->max_cmd_len);
3623 o->set_registry_size(o, p->mcast_list_len);
3633 * Therefore each none-empty command will consume o->max_cmd_len.
3636 o->total_pending_num += o->max_cmd_len;
3646 struct bnx2x_mcast_obj *o = p->mcast_obj;
3648 o->set_registry_size(o, old_num_macs);
3655 o->total_pending_num -= o->max_cmd_len;
3659 struct bnx2x_mcast_obj *o, int idx,
3663 struct bnx2x_raw_obj *r = &o->raw;
3716 * @o: multicast info
3726 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3734 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3736 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3754 struct bnx2x_mcast_obj *o = p->mcast_obj;
3759 if (list_empty(&o->pending_cmds_head))
3763 cmd_pos = list_first_entry(&o->pending_cmds_head,
3770 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3785 o->hdl_restore(bp, o, 0, &cnt);
3823 * @o: multicast info
3831 struct bnx2x_mcast_obj *o)
3833 struct bnx2x_raw_obj *raw = &o->raw;
3846 if (!list_empty(&o->registry.exact_match.macs))
3864 &o->registry.exact_match.macs);
3867 elem = list_first_entry(&o->registry.exact_match.macs,
3871 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3881 struct bnx2x_mcast_obj *o = p->mcast_obj;
3882 struct bnx2x_raw_obj *raw = &o->raw;
3891 for (i = 0; i < o->max_cmd_len ; i++)
3900 if (list_empty(&o->pending_cmds_head))
3901 o->clear_sched(o);
3907 /* For 57710 every command has o->max_cmd_len length to ensure that
3910 o->total_pending_num -= o->max_cmd_len;
3914 WARN_ON(cnt > o->max_cmd_len);
3925 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3956 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3958 return o->registry.exact_match.num_macs_set;
3961 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3963 return o->registry.aprox_match.num_bins_set;
3966 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3969 o->registry.exact_match.num_macs_set = n;
3972 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3975 o->registry.aprox_match.num_bins_set = n;
3982 struct bnx2x_mcast_obj *o = p->mcast_obj;
3983 struct bnx2x_raw_obj *r = &o->raw;
3989 old_reg_size = o->get_registry_size(o);
3992 rc = o->validate(bp, p, cmd);
3997 if ((!p->mcast_list_len) && (!o->check_sched(o)))
4000 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
4001 o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
4007 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
4008 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
4024 rc = o->config_mcast(bp, p, cmd);
4030 rc = o->wait_comp(bp, o);
4039 o->revert(bp, p, old_reg_size, cmd);
4044 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
4047 clear_bit(o->sched_state, o->raw.pstate);
4051 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
4054 set_bit(o->sched_state, o->raw.pstate);
4058 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
4060 return !!test_bit(o->sched_state, o->raw.pstate);
4063 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
4065 return o->raw.check_pending(&o->raw) || o->check_sched(o);
4210 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
4215 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
4221 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
4228 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
4235 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
4240 cur_credit = atomic_read(&o->credit);
4245 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
4252 struct bnx2x_credit_pool_obj *o,
4263 if (!o->pool_mirror[vec])
4270 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
4272 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
4273 *offset = o->base_pool_offset + idx;
4282 struct bnx2x_credit_pool_obj *o,
4285 if (offset < o->base_pool_offset)
4288 offset -= o->base_pool_offset;
4290 if (offset >= o->pool_sz)
4294 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
4300 struct bnx2x_credit_pool_obj *o,
4307 struct bnx2x_credit_pool_obj *o,
4486 struct bnx2x_rss_config_obj *o = p->rss_obj;
4487 struct bnx2x_raw_obj *r = &o->raw;
4561 data->rss_engine_id = o->engine_id;
4570 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4605 struct bnx2x_rss_config_obj *o = p->rss_obj;
4606 struct bnx2x_raw_obj *r = &o->raw;
4617 rc = o->config_rss(bp, p);
4660 struct bnx2x_queue_sp_obj *o = params->q_obj;
4662 unsigned long *pending = &o->pending;
4665 rc = o->check_transition(bp, o, params);
4672 DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
4673 pending_bit = o->set_pending(o, params);
4674 DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
4678 o->complete_cmd(bp, o, pending_bit);
4681 rc = o->send_cmd(bp, params);
4683 o->next_state = BNX2X_Q_STATE_MAX;
4690 rc = o->wait_comp(bp, o, pending_bit);
4720 struct bnx2x_queue_sp_obj *o,
4723 return bnx2x_state_wait(bp, cmd, &o->pending);
4730 * @o: queue info
4736 struct bnx2x_queue_sp_obj *o,
4739 unsigned long cur_pending = o->pending;
4743 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
4744 o->state, cur_pending, o->next_state);
4748 if (o->next_tx_only >= o->max_cos)
4753 o->next_tx_only, o->max_cos);
4757 cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4759 if (o->next_tx_only) /* print num tx-only if any exist */
4761 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4763 o->state = o->next_state;
4764 o->num_tx_only = o->next_tx_only;
4765 o->next_state = BNX2X_Q_STATE_MAX;
4767 /* It's important that o->state and o->next_state are
4768 * updated before o->pending.
4772 clear_bit(cmd, &o->pending);
4792 struct bnx2x_queue_sp_obj *o,
4797 gen_data->client_id = o->cl_id;
4813 gen_data->func_id = o->func_id;
4827 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4865 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4879 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
5006 struct bnx2x_queue_sp_obj *o = params->q_obj;
5012 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
5023 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
5034 for (cos = 0; cos < o->max_cos; cos++) {
5036 o->cids[cos], cos);
5038 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
5042 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
5052 struct bnx2x_queue_sp_obj *o = params->q_obj;
5054 (struct client_init_ramrod_data *)o->rdata;
5055 dma_addr_t data_mapping = o->rdata_mapping;
5070 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
5078 struct bnx2x_queue_sp_obj *o = params->q_obj;
5080 (struct client_init_ramrod_data *)o->rdata;
5081 dma_addr_t data_mapping = o->rdata_mapping;
5097 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
5105 struct bnx2x_queue_sp_obj *o = params->q_obj;
5107 (struct tx_queue_init_ramrod_data *)o->rdata;
5108 dma_addr_t data_mapping = o->rdata_mapping;
5114 if (cid_index >= o->max_cos) {
5116 o->cl_id, cid_index);
5131 o->cids[cid_index], rdata->general.client_id,
5140 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
5220 struct bnx2x_queue_sp_obj *o = params->q_obj;
5222 (struct client_update_ramrod_data *)o->rdata;
5223 dma_addr_t data_mapping = o->rdata_mapping;
5228 if (cid_index >= o->max_cos) {
5230 o->cl_id, cid_index);
5238 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
5247 o->cids[cid_index], U64_HI(data_mapping),
5317 struct bnx2x_queue_sp_obj *o = params->q_obj;
5319 (struct tpa_update_ramrod_data *)o->rdata;
5320 dma_addr_t data_mapping = o->rdata_mapping;
5329 bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
5336 ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
5345 o->cids[BNX2X_PRIMARY_CID_INDEX],
5353 struct bnx2x_queue_sp_obj *o = params->q_obj;
5356 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
5363 struct bnx2x_queue_sp_obj *o = params->q_obj;
5366 if (cid_idx >= o->max_cos) {
5368 o->cl_id, cid_idx);
5373 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
5379 struct bnx2x_queue_sp_obj *o = params->q_obj;
5382 if (cid_index >= o->max_cos) {
5384 o->cl_id, cid_index);
5389 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
5395 struct bnx2x_queue_sp_obj *o = params->q_obj;
5398 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
5482 * @o: queue info
5495 struct bnx2x_queue_sp_obj *o,
5498 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
5502 u8 next_tx_only = o->num_tx_only;
5508 o->pending = 0;
5509 o->next_state = BNX2X_Q_STATE_MAX;
5515 if (o->pending) {
5517 o->pending);
5573 next_tx_only = o->num_tx_only + 1;
5596 next_tx_only = o->num_tx_only - 1;
5623 if (o->num_tx_only == 0)
5650 o->next_state = next_state;
5651 o->next_tx_only = next_tx_only;
5715 struct bnx2x_func_sp_obj *o)
5718 if (o->pending)
5721 /* unsure the order of reading of o->pending and o->state
5722 * o->pending should be read first
5726 return o->state;
5730 struct bnx2x_func_sp_obj *o,
5733 return bnx2x_state_wait(bp, cmd, &o->pending);
5740 * @o: function info
5747 struct bnx2x_func_sp_obj *o,
5750 unsigned long cur_pending = o->pending;
5754 cmd, BP_FUNC(bp), o->state,
5755 cur_pending, o->next_state);
5761 cmd, BP_FUNC(bp), o->next_state);
5763 o->state = o->next_state;
5764 o->next_state = BNX2X_F_STATE_MAX;
5766 /* It's important that o->state and o->next_state are
5767 * updated before o->pending.
5771 clear_bit(cmd, &o->pending);
5781 * @o: function info
5787 struct bnx2x_func_sp_obj *o,
5793 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5801 * @o: function info
5813 struct bnx2x_func_sp_obj *o,
5816 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5823 o->pending = 0;
5824 o->next_state = BNX2X_F_STATE_MAX;
5830 if (o->pending)
5855 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5859 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5866 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5870 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5879 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5883 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5898 o->next_state = next_state;
5989 struct bnx2x_func_sp_obj *o = params->f_obj;
5990 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
6046 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
6109 struct bnx2x_func_sp_obj *o = params->f_obj;
6110 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
6132 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
6140 struct bnx2x_func_sp_obj *o = params->f_obj;
6142 (struct function_start_data *)o->rdata;
6143 dma_addr_t data_mapping = o->rdata_mapping;
6202 struct bnx2x_func_sp_obj *o = params->f_obj;
6204 (struct function_update_data *)o->rdata;
6205 dma_addr_t data_mapping = o->rdata_mapping;
6281 struct bnx2x_func_sp_obj *o = params->f_obj;
6283 (struct function_update_data *)o->afex_rdata;
6284 dma_addr_t data_mapping = o->afex_rdata_mapping;
6320 struct bnx2x_func_sp_obj *o = params->f_obj;
6322 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
6371 struct bnx2x_func_sp_obj *o = params->f_obj;
6373 (struct flow_control_configuration *)o->rdata;
6374 dma_addr_t data_mapping = o->rdata_mapping;
6406 struct bnx2x_func_sp_obj *o = params->f_obj;
6408 (struct set_timesync_ramrod_data *)o->rdata;
6409 dma_addr_t data_mapping = o->rdata_mapping;
6506 struct bnx2x_func_sp_obj *o = params->f_obj;
6509 unsigned long *pending = &o->pending;
6511 mutex_lock(&o->one_pending_mutex);
6514 rc = o->check_transition(bp, o, params);
6518 mutex_unlock(&o->one_pending_mutex);
6520 mutex_lock(&o->one_pending_mutex);
6521 rc = o->check_transition(bp, o, params);
6524 mutex_unlock(&o->one_pending_mutex);
6529 mutex_unlock(&o->one_pending_mutex);
6538 bnx2x_func_state_change_comp(bp, o, cmd);
6539 mutex_unlock(&o->one_pending_mutex);
6542 rc = o->send_cmd(bp, params);
6544 mutex_unlock(&o->one_pending_mutex);
6547 o->next_state = BNX2X_F_STATE_MAX;
6554 rc = o->wait_comp(bp, o, cmd);