1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014 QLogic Corporation
24 * The contents of this file are subject to the terms of the
25 * QLogic End User License (the "License").
26 * You may not use this file except in compliance with the License.
27 *
28 * You can obtain a copy of the License at
29 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
30 * QLogic_End_User_Software_License.txt
31 * See the License for the specific language governing permissions
32 * and limitations under the License.
33 */
34
35 #include "bnxe.h"
36
37 void
mm_acquire_tcp_lock(lm_device_t * pdev,lm_tcp_con_t * tcp_con)38 mm_acquire_tcp_lock(
39 lm_device_t *pdev,
40 lm_tcp_con_t *tcp_con)
41 {
42 BnxeDbgBreak((um_device_t *)pdev);
43 }
44
45
46 void
mm_release_tcp_lock(lm_device_t * pdev,lm_tcp_con_t * tcp_con)47 mm_release_tcp_lock(
48 lm_device_t *pdev,
49 lm_tcp_con_t *tcp_con)
50 {
51 BnxeDbgBreak((um_device_t *)pdev);
52 }
53
54
MM_ACQUIRE_TOE_LOCK(lm_device_t * pDev)55 void MM_ACQUIRE_TOE_LOCK(lm_device_t *pDev)
56 {
57 BNXE_LOCK_ENTER_TOE((um_device_t *)pDev);
58 }
59
60
MM_RELEASE_TOE_LOCK(lm_device_t * pDev)61 void MM_RELEASE_TOE_LOCK(lm_device_t *pDev)
62 {
63 BNXE_LOCK_EXIT_TOE((um_device_t *)pDev);
64 }
65
66
MM_ACQUIRE_TOE_GRQ_LOCK_DPC(lm_device_t * pdev,u8_t idx)67 void MM_ACQUIRE_TOE_GRQ_LOCK_DPC(lm_device_t *pdev, u8_t idx)
68 {
69 BnxeDbgBreak((um_device_t *)pdev);
70 }
71
72
MM_RELEASE_TOE_GRQ_LOCK_DPC(lm_device_t * pdev,u8_t idx)73 void MM_RELEASE_TOE_GRQ_LOCK_DPC(lm_device_t *pdev, u8_t idx)
74 {
75 BnxeDbgBreak((um_device_t *)pdev);
76 }
77
78
MM_ACQUIRE_TOE_GRQ_LOCK(lm_device_t * pdev,u8_t idx)79 void MM_ACQUIRE_TOE_GRQ_LOCK(lm_device_t *pdev, u8_t idx)
80 {
81 BnxeDbgBreak((um_device_t *)pdev);
82 }
83
84
MM_RELEASE_TOE_GRQ_LOCK(lm_device_t * pdev,u8_t idx)85 void MM_RELEASE_TOE_GRQ_LOCK(lm_device_t *pdev, u8_t idx)
86 {
87 BnxeDbgBreak((um_device_t *)pdev);
88 }
89
90
mm_tcp_complete_path_upload_request(struct _lm_device_t * pdev,lm_path_state_t * path)91 void mm_tcp_complete_path_upload_request(
92 struct _lm_device_t * pdev,
93 lm_path_state_t * path
94 )
95 {
96 BnxeDbgBreak((um_device_t *)pdev);
97 }
98
99
mm_tcp_complete_neigh_upload_request(struct _lm_device_t * pdev,lm_neigh_state_t * neigh)100 void mm_tcp_complete_neigh_upload_request(
101 struct _lm_device_t * pdev,
102 lm_neigh_state_t * neigh
103 )
104 {
105 BnxeDbgBreak((um_device_t *)pdev);
106 }
107
108
mm_tcp_comp_slow_path_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_slow_path_request_t * sp_request)109 void mm_tcp_comp_slow_path_request(
110 struct _lm_device_t *pdev,
111 lm_tcp_state_t *tcp,
112 lm_tcp_slow_path_request_t *sp_request)
113 {
114 BnxeDbgBreak((um_device_t *)pdev);
115 }
116
117
mm_tcp_complete_bufs(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_con_t * tcp_con,s_list_t * buf_list,lm_status_t lm_status)118 void mm_tcp_complete_bufs(
119 struct _lm_device_t *pdev,
120 lm_tcp_state_t *tcp,
121 lm_tcp_con_t *tcp_con, /* Rx OR Tx connection */
122 s_list_t *buf_list, /* list of lm_tcp_buffer_t */
123 lm_status_t lm_status /* completion status for all given TBs */)
124 {
125 BnxeDbgBreak((um_device_t *)pdev);
126 }
127
128
mm_tcp_indicating_bufs(lm_tcp_con_t * con)129 u8_t mm_tcp_indicating_bufs(
130 lm_tcp_con_t * con /* connection to be checked */
131 )
132 {
133 BnxeDbgBreak(NULL);
134 return 0;
135 }
136
137
mm_tcp_abort_bufs(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN lm_tcp_con_t * con,IN lm_status_t status)138 void mm_tcp_abort_bufs (
139 IN struct _lm_device_t * pdev, /* device handle */
140 IN lm_tcp_state_t * tcp, /* L4 state handle */
141 IN lm_tcp_con_t * con, /* connection handle */
142 IN lm_status_t status /* status to abort buffers with */
143 )
144 {
145 BnxeDbgBreak((um_device_t *)pdev);
146 }
147
148
mm_tcp_indicate_rst_received(IN lm_device_t * pdev,IN lm_tcp_state_t * tcp)149 void mm_tcp_indicate_rst_received(
150 IN lm_device_t * pdev,
151 IN lm_tcp_state_t * tcp
152 )
153 {
154 BnxeDbgBreak((um_device_t *)pdev);
155 }
156
157
mm_tcp_indicate_fin_received(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)158 void mm_tcp_indicate_fin_received(
159 IN struct _lm_device_t * pdev, /* device handle */
160 IN lm_tcp_state_t * tcp /* L4 state handle */
161 )
162 {
163 BnxeDbgBreak((um_device_t *)pdev);
164 }
165
166
mm_tcp_graceful_disconnect_done(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN lm_status_t status)167 void mm_tcp_graceful_disconnect_done(
168 IN struct _lm_device_t * pdev, /* device handle */
169 IN lm_tcp_state_t * tcp, /* L4 state handle */
170 IN lm_status_t status /* May be SUCCESS, ABORTED or UPLOAD IN PROGRESS */
171 )
172 {
173 BnxeDbgBreak((um_device_t *)pdev);
174 }
175
176
mm_tcp_rx_indicate_gen_buf(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_frag_list_t * frag_list,void * return_buffer_ctx)177 u32_t mm_tcp_rx_indicate_gen_buf (
178 struct _lm_device_t * pdev,
179 lm_tcp_state_t * tcp,
180 lm_frag_list_t * frag_list,
181 void * return_buffer_ctx
182 )
183 {
184 BnxeDbgBreak((um_device_t *)pdev);
185 return 0;
186 }
187
188
mm_tcp_rx_indicate_gen(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)189 void mm_tcp_rx_indicate_gen (
190 struct _lm_device_t * pdev,
191 lm_tcp_state_t * tcp
192 )
193 {
194 BnxeDbgBreak((um_device_t *)pdev);
195 }
196
197
_schedule_work_item_for_alloc_gen_bufs(um_device_t * pdev)198 static void _schedule_work_item_for_alloc_gen_bufs(um_device_t * pdev)
199 {
200 BnxeDbgBreak((um_device_t *)pdev);
201 }
202
203
_schedule_work_item_for_free_gen_bufs(um_device_t * pdev,lm_tcp_gen_buf_t * gen_buf)204 static void _schedule_work_item_for_free_gen_bufs(
205 um_device_t * pdev,
206 lm_tcp_gen_buf_t * gen_buf
207 )
208 {
209 BnxeDbgBreak((um_device_t *)pdev);
210 }
211
212
mm_tcp_get_gen_bufs(struct _lm_device_t * pdev,d_list_t * gb_list,u32_t nbufs,u8_t sb_idx)213 u32_t mm_tcp_get_gen_bufs(
214 struct _lm_device_t * pdev,
215 d_list_t * gb_list,
216 u32_t nbufs,
217 u8_t sb_idx
218 )
219 {
220 BnxeDbgBreak((um_device_t *)pdev);
221 return 0;
222 }
223
224
mm_tcp_return_gen_bufs(lm_device_t * pdev,lm_tcp_gen_buf_t * gen_buf,u32_t flags,u8_t grq_idxxx)225 void mm_tcp_return_gen_bufs(
226 lm_device_t * pdev,
227 lm_tcp_gen_buf_t * gen_buf,
228 u32_t flags,
229 u8_t grq_idxxx
230 )
231 {
232 BnxeDbgBreak((um_device_t *)pdev);
233 }
234
235
mm_tcp_return_list_of_gen_bufs(struct _lm_device_t * pdev,d_list_t * returned_list_of_gen_bufs,u32_t flags,u8_t grq_idxxx)236 void mm_tcp_return_list_of_gen_bufs(
237 struct _lm_device_t * pdev,
238 d_list_t * returned_list_of_gen_bufs,
239 u32_t flags,
240 u8_t grq_idxxx
241 )
242 {
243 BnxeDbgBreak((um_device_t *)pdev);
244 }
245
246
mm_tcp_copy_to_tcp_buf(lm_device_t * pdev,lm_tcp_state_t * tcp_state,lm_tcp_buffer_t * tcp_buf,u8_t * mem_buf,u32_t tcp_buf_offset,u32_t nbytes)247 u32_t mm_tcp_copy_to_tcp_buf(
248 lm_device_t * pdev,
249 lm_tcp_state_t * tcp_state,
250 lm_tcp_buffer_t * tcp_buf, /* TCP buffer to copy to */
251 u8_t * mem_buf, /* Memory buffer to copy from */
252 u32_t tcp_buf_offset,
253 u32_t nbytes
254 )
255 {
256 BnxeDbgBreak((um_device_t *)pdev);
257 return 0;
258 }
259
260
261 void
mm_tcp_indicate_retrieve_indication(lm_device_t * pdev,lm_tcp_state_t * tcp_state,l4_upload_reason_t upload_reason)262 mm_tcp_indicate_retrieve_indication(
263 lm_device_t *pdev,
264 lm_tcp_state_t *tcp_state,
265 l4_upload_reason_t upload_reason)
266 {
267 BnxeDbgBreak((um_device_t *)pdev);
268 }
269
270
mm_tcp_update_required_gen_bufs(struct _lm_device_t * pdev,u32_t new_mss,u32_t old_mss,u32_t new_initial_rcv_wnd,u32_t old_initial_rcv_wnd)271 void mm_tcp_update_required_gen_bufs(
272 struct _lm_device_t * pdev,
273 u32_t new_mss,
274 u32_t old_mss,
275 u32_t new_initial_rcv_wnd,
276 u32_t old_initial_rcv_wnd)
277 {
278 BnxeDbgBreak((um_device_t *)pdev);
279 }
280
281
mm_tcp_post_empty_slow_path_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t request_type)282 lm_status_t mm_tcp_post_empty_slow_path_request(
283 struct _lm_device_t * pdev,
284 lm_tcp_state_t * tcp,
285 u32_t request_type)
286 {
287 BnxeDbgBreak((um_device_t *)pdev);
288 return 0;
289 }
290
291
mm_tcp_del_tcp_state(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)292 void mm_tcp_del_tcp_state(
293 struct _lm_device_t * pdev,
294 lm_tcp_state_t * tcp)
295 {
296 BnxeDbgBreak((um_device_t *)pdev);
297 }
298
299
mm_tcp_rx_peninsula_to_rq_copy_dmae(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_address_t gen_buf_phys,u32_t gen_buf_offset,lm_tcp_buffer_t * tcp_buf,u32_t tcp_buf_offset,u32_t nbytes)300 u32_t mm_tcp_rx_peninsula_to_rq_copy_dmae(
301 struct _lm_device_t * pdev,
302 lm_tcp_state_t * tcp,
303 lm_address_t gen_buf_phys,
304 u32_t gen_buf_offset,
305 lm_tcp_buffer_t * tcp_buf,
306 u32_t tcp_buf_offset,
307 u32_t nbytes
308 )
309 {
310 BnxeDbgBreak((um_device_t *)pdev);
311 return 0;
312 }
313
314