1 /*******************************************************************************
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * Copyright 2014 QLogic Corporation
22 * The contents of this file are subject to the terms of the
23 * QLogic End User License (the "License").
24 * You may not use this file except in compliance with the License.
25 *
26 * You can obtain a copy of the License at
27 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
28 * QLogic_End_User_Software_License.txt
29 * See the License for the specific language governing permissions
30 * and limitations under the License.
31 *
32 *
33 * Module Description:
34 *
35 *
36 * History:
37 * 02/05/07 Alon Elhanani Inception.
38 ******************************************************************************/
39
40 #include "lm5710.h"
41
42
43 // converts index to DMAE command register name define
lm_dmae_idx_to_go_cmd(u8_t idx)44 u32_t lm_dmae_idx_to_go_cmd( u8_t idx )
45 {
46 u32_t ret = 0 ;
47 switch( idx )
48 {
49 case 0: ret = DMAE_REG_GO_C0; break;
50 case 1: ret = DMAE_REG_GO_C1; break;
51 case 2: ret = DMAE_REG_GO_C2; break;
52 case 3: ret = DMAE_REG_GO_C3; break;
53 case 4: ret = DMAE_REG_GO_C4; break;
54 case 5: ret = DMAE_REG_GO_C5; break;
55 case 6: ret = DMAE_REG_GO_C6; break;
56 case 7: ret = DMAE_REG_GO_C7; break;
57 case 8: ret = DMAE_REG_GO_C8; break;
58 case 9: ret = DMAE_REG_GO_C9; break;
59 case 10: ret = DMAE_REG_GO_C10; break;
60 case 11: ret = DMAE_REG_GO_C11; break;
61 case 12: ret = DMAE_REG_GO_C12; break;
62 case 13: ret = DMAE_REG_GO_C13; break;
63 case 14: ret = DMAE_REG_GO_C14; break;
64 case 15: ret = DMAE_REG_GO_C15; break;
65 default:
66 break;
67 }
68 return ret ;
69 }
70
71 /**
72 * @defgroup LockingPolicy Locking Policy
73 * @{
74 */
75
76 /**lm_locking_policy_hwlock_id_for_resource
77 * Return the hwlock for some protected resource.
78 *
79 *
80 * @param resource the resource
81 *
82 * @return u8_t the hwlock for the given resource.
83 */
lm_dmae_locking_policy_hwlock_id_for_resource(struct _lm_device_t * pdev,IN const u32_t resource)84 static u8_t lm_dmae_locking_policy_hwlock_id_for_resource(struct _lm_device_t* pdev, IN const u32_t resource)
85 {
86 switch (resource)
87 {
88 case LM_PROTECTED_RESOURCE_DMAE_TOE:
89 {
90 return HW_LOCK_RESOURCE_PORT0_DMAE_COPY_CMD + PORT_ID(pdev);
91 }
92 break;
93 default:
94 {
95 DbgBreakMsg("HW lock for resource does not exist.\n");
96 return LM_DMAE_NO_HWLOCK;
97 }
98 break;
99 }
100 }
101
102
lm_dmae_locking_policy_create(struct _lm_device_t * pdev,IN const u32_t resource,IN const lm_dmae_locking_policy_type_t type,OUT lm_dmae_locking_policy_t * policy)103 lm_status_t lm_dmae_locking_policy_create( struct _lm_device_t* pdev,
104 IN const u32_t resource,
105 IN const lm_dmae_locking_policy_type_t type,
106 OUT lm_dmae_locking_policy_t* policy)
107 {
108 mm_mem_zero(policy, sizeof(lm_dmae_locking_policy_t));
109
110 if (type > LM_DMAE_LOCKING_POLICY_TYPE_NONE)
111 {
112 mm_init_lock(pdev, &policy->spinlock);
113 }
114
115 if (type == LM_DMAE_LOCKING_POLICY_TYPE_INTER_PF)
116 {
117 policy->hwlock = lm_dmae_locking_policy_hwlock_id_for_resource(pdev, resource);
118 }
119
120 return LM_STATUS_SUCCESS;
121 }
122
123 #ifdef _VBD_
124 /*28158 is 'No IRQL was saved into '_Param_(2)->spinlock.irql'. The IRQL is saved by the call to mm_acquire_lock.*/
125 #pragma warning(push)
126 #pragma warning(disable:28158)
127
128 __drv_maxIRQL(DISPATCH_LEVEL)
129 __drv_at(context->locking_policy->spinlock.irql, __drv_savesIRQL)
__drv_setsIRQL(DISPATCH_LEVEL)130 __drv_setsIRQL(DISPATCH_LEVEL)
131 #endif
132 lm_status_t lm_dmae_locking_policy_lock(struct _lm_device_t* pdev, lm_dmae_locking_policy_t* locking_policy)
133 {
134 lm_status_t lm_status = LM_STATUS_SUCCESS;
135
136 lm_status = mm_acquire_lock(&locking_policy->spinlock);
137 if (LM_STATUS_SUCCESS != lm_status)
138 {
139 DbgBreakMsg("Failed to acquire spinlock.\n");
140 return lm_status;
141 }
142
143 if (LM_DMAE_NO_HWLOCK != locking_policy->hwlock)
144 {
145 lm_status = lm_hw_lock(pdev, locking_policy->hwlock, TRUE);
146 if (LM_STATUS_SUCCESS != lm_status)
147 {
148 DbgBreakMsg("Failed to acquire HW lock.\n");
149
150 lm_status = mm_release_lock(&locking_policy->spinlock);
151 if (LM_STATUS_SUCCESS != lm_status)
152 {
153 DbgBreakMsg("Failed to roll-back after locking failure.\n");
154 return lm_status;
155 }
156
157 return lm_status;
158 }
159 }
160
161 return lm_status;
162 }
163 #ifdef _VBD_
164 #pragma warning(pop)
165
166 /*28157 is 'The IRQL in '_Param_(2)->spinlock.irql' was never restored'. The IRQL is restored by the call to mm_release_lock.*/
167 #pragma warning(push)
168 #pragma warning(disable:28157)
169 #if defined(NTDDI_WIN8)
170 _IRQL_requires_(DISPATCH_LEVEL)
171 __drv_at(context->locking_policy->spinlock.irql, __drv_restoresIRQL )
172 #endif
173 #endif
lm_dmae_locking_policy_unlock(struct _lm_device_t * pdev,lm_dmae_locking_policy_t * locking_policy)174 lm_status_t lm_dmae_locking_policy_unlock(struct _lm_device_t* pdev, lm_dmae_locking_policy_t* locking_policy)
175 {
176 lm_status_t lm_status = LM_STATUS_SUCCESS;
177
178 if (LM_DMAE_NO_HWLOCK != locking_policy->hwlock)
179 {
180 lm_status = lm_hw_unlock(pdev, locking_policy->hwlock);
181 if (LM_STATUS_SUCCESS != lm_status)
182 {
183 DbgBreakMsg("Failed to release HW lock.\n");
184 return lm_status;
185 }
186 }
187
188 lm_status = mm_release_lock(&locking_policy->spinlock);
189 if (LM_STATUS_SUCCESS != lm_status)
190 {
191 DbgBreakMsg("Failed to release spinlock.\n");
192
193 if (LM_DMAE_NO_HWLOCK != locking_policy->hwlock)
194 {
195 //try to re-acquire the HW lock, so at least we'll be in a consistent state.
196 lm_status = lm_hw_lock(pdev, locking_policy->hwlock, TRUE);
197 if (LM_STATUS_SUCCESS != lm_status)
198 {
199 DbgBreakMsg("Failed to roll-back after release failure.\n"); //This is a double-fault. Don't try to recover.
200 return lm_status;
201 }
202 }
203
204 return lm_status;
205 }
206
207
208 return lm_status;
209 }
210
211
212 #ifdef _VBD_
213 #pragma warning(pop)
214 #endif
215 /**
216 * @}
217 */
218
219 /**
220 * @defgroup DMAE_Operation DMAE operation
221 * @{
222 */
223
224 /**lm_dmae_opcode
225 * Construct a DMAE command opcode according to HSI and given
226 * parameters.
227 *
228 *
229 * @param pdev the device to use
230 * @param source the source of the operation
231 * @param dest the destination of the operation
232 * @param b_complete_to_host TRUE if the completion value of the
233 * operation whould be written to host
234 * memory, FALSE if to GRC.
235 * @param b_resume_prev TRUE if this operation should resume a
236 * previous operation, FALSE if the source
237 * address should be used.
238 * @param b_change_endianity TRUE if the operation should
239 * byte-swap its data
240 *
241 * @return u32_t an opcode according to HSI rules.
242 */
243 static u32_t
lm_dmae_opcode(struct _lm_device_t * pdev,IN const lm_dmae_address_t source,IN const lm_dmae_address_t dest,IN const u8_t b_complete_to_host,IN const u8_t b_resume_prev,IN const u8_t b_change_endianity)244 lm_dmae_opcode( struct _lm_device_t* pdev,
245 IN const lm_dmae_address_t source,
246 IN const lm_dmae_address_t dest,
247 IN const u8_t b_complete_to_host,
248 IN const u8_t b_resume_prev,
249 IN const u8_t b_change_endianity)
250 {
251 u32_t opcode = 0;
252
253 opcode |= ((source.type == LM_DMAE_ADDRESS_GRC)?1:0) <<DMAE_CMD_SRC_SHIFT;
254 opcode |= ((dest.type == LM_DMAE_ADDRESS_GRC)?2:1) <<DMAE_CMD_DST_SHIFT;
255 opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT;
256 opcode |= 1 << DMAE_CMD_C_TYPE_ENABLE_SHIFT;
257 opcode |= 0 << DMAE_CMD_C_TYPE_CRC_ENABLE_SHIFT;
258 opcode |= (b_change_endianity ? 3:2)<<DMAE_CMD_ENDIANITY_SHIFT;
259 opcode |= PORT_ID(pdev) << DMAE_CMD_PORT_SHIFT;
260 opcode |= 0 << DMAE_CMD_CRC_RESET_SHIFT ;
261 opcode |= (!b_resume_prev) << DMAE_CMD_SRC_RESET_SHIFT;
262 opcode |= 1 << DMAE_CMD_DST_RESET_SHIFT;
263 opcode |= VNIC_ID(pdev) << DMAE_CMD_E1HVN_SHIFT;
264
265 return opcode;
266 }
267
268 /**lm_dmae_command_set_block
269 * Set the source, destination and length of a DMAE command HSI
270 * structure.
271 *
272 *
273 * @param pdev the device to use
274 * @param command the command to initialize
275 * @param source the source of the operation
276 * @param dest the destination of the operation
277 * @param length the length, in DWORDS, of the operation
278 */
279 static void
lm_dmae_command_set_block(struct _lm_device_t * pdev,struct dmae_cmd * command,IN const lm_dmae_address_t source,IN const lm_dmae_address_t dest,IN const u16_t length)280 lm_dmae_command_set_block( struct _lm_device_t* pdev,
281 struct dmae_cmd* command,
282 IN const lm_dmae_address_t source,
283 IN const lm_dmae_address_t dest,
284 IN const u16_t length/*in DWORDS*/)
285 {
286 u64_t source_offset = lm_dmae_address_native_offset(&source);
287 u64_t dest_offset = lm_dmae_address_native_offset(&dest);
288
289 command->src_addr_hi = U64_HI(source_offset);
290 command->src_addr_lo = U64_LO(source_offset);
291
292 command->dst_addr_hi = U64_HI(dest_offset);
293 command->dst_addr_lo = U64_LO(dest_offset);
294
295 command->len = length;
296 }
297
298
299 /**lm_dmae_initialize_command_by_block
300 * Initialize an HSI DMAE command struct according to a driver
301 * DMAE block data structure.
302 *
303 * @param pdev the device to use
304 * @param context the context of the operation
305 * @param command the command to initialize
306 * @param block the DMAE block according to which the command
307 * will be initialized
308 * @param completion_value the completion value that should be
309 * written to the context's completion
310 * word when this command completes.
311 *
312 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
313 * failure value on failure.
314 */
315 static lm_status_t
lm_dmae_initialize_command_by_block(struct _lm_device_t * pdev,lm_dmae_context_t * context,struct dmae_cmd * command,IN lm_dmae_block_t * block,IN const u32_t completion_value)316 lm_dmae_initialize_command_by_block(struct _lm_device_t* pdev,
317 lm_dmae_context_t* context,
318 struct dmae_cmd* command,
319 IN lm_dmae_block_t* block,
320 IN const u32_t completion_value)
321 {
322 command->opcode = lm_dmae_opcode(pdev, block->source, block->dest, TRUE, FALSE, context->change_endianity);
323
324 lm_dmae_command_set_block(pdev, command, block->source, block->dest, block->length);
325
326 command->comp_addr_hi = context->completion_word_paddr.as_u32.high;
327 command->comp_addr_lo = context->completion_word_paddr.as_u32.low;
328
329 command->comp_val = completion_value;
330
331 return LM_STATUS_SUCCESS;
332 }
333
lm_dmae_operation_create(struct _lm_device_t * pdev,IN const lm_dmae_address_t source,IN const lm_dmae_address_t dest,IN const u16_t length,IN const u8_t replicate_source,IN const u8_t le32_swap,IN lm_dmae_context_t * context,OUT lm_dmae_operation_t * operation)334 lm_status_t lm_dmae_operation_create( struct _lm_device_t* pdev,
335 IN const lm_dmae_address_t source,
336 IN const lm_dmae_address_t dest,
337 IN const u16_t length,
338 IN const u8_t replicate_source,
339 IN const u8_t le32_swap,
340 IN lm_dmae_context_t* context,
341 OUT lm_dmae_operation_t* operation)
342 {
343 lm_status_t lm_status = LM_STATUS_FAILURE;
344
345 DbgBreakIf(LM_DMAE_MODE_SINGLE_BLOCK != context->mode);
346 DbgBreakIf(0 == context->completion_word_paddr.as_u64);
347
348 if( (LM_DMAE_ADDRESS_HOST_VIRT == source.type) && (LM_DMAE_ADDRESS_HOST_VIRT == dest.type) )
349 {
350 DbgBreakMsg("the intermediate buffer can be used for source or destination but not both.\n");
351 return LM_STATUS_INVALID_PARAMETER;
352 }
353
354 mm_mem_zero(operation, sizeof(lm_dmae_operation_t));
355
356 operation->mode = LM_DMAE_MODE_SINGLE_BLOCK;
357 operation->b_replicate_source = replicate_source;
358 operation->le32_swap = le32_swap;
359 operation->context = context;
360 operation->b_sync = TRUE;
361
362 operation->blocks[0].source = source;
363 operation->blocks[0].dest = dest;
364 operation->blocks[0].length = length;
365
366 lm_status = lm_dmae_initialize_command_by_block(pdev,
367 operation->context,
368 &operation->main_cmd,
369 &operation->blocks[0],
370 DMAE_COMPLETION_VAL);
371 if (LM_STATUS_SUCCESS != lm_status)
372 {
373 return lm_status;
374 }
375
376 return lm_status;
377 }
378
379 /**lm_dmae_initialize_sgl_loader_command
380 * Initialize the DMAE command HSI struct for an SGL loader
381 * command.
382 *
383 * @param pdev the device to use
384 * @param operation the operation which the command is a part of
385 * @param command the command to initialize
386 *
387 * @return lm_status_t LM_STATUS_SUCCESS on success, some other
388 * failure value on failure.
389 */
390 static lm_status_t
lm_dmae_initialize_sgl_loader_command(struct _lm_device_t * pdev,lm_dmae_operation_t * operation,struct dmae_cmd * command)391 lm_dmae_initialize_sgl_loader_command( struct _lm_device_t* pdev,
392 lm_dmae_operation_t* operation,
393 struct dmae_cmd* command)
394 {
395 lm_dmae_address_t source = lm_dmae_address( operation->executer_paddr.as_u64 ,LM_DMAE_ADDRESS_HOST_PHYS);
396 lm_dmae_address_t dest = lm_dmae_address( DMAE_REG_CMD_MEM + operation->context->executer_channel*DMAE_CMD_SIZE*sizeof(u32_t),
397 LM_DMAE_ADDRESS_GRC);
398
399
400 command->opcode = lm_dmae_opcode(pdev, source, dest, FALSE, TRUE, operation->context->change_endianity);
401
402 lm_dmae_command_set_block(pdev, command, source, dest, sizeof(struct dmae_cmd) / sizeof(u32_t));
403
404 // Special handling for E1 HW DMAE operation: we give here the size we are writing MINUS 1,
405 // since when 'no reset' is on (src address is 0 ), the DMAE advance pointer by
406 // length + 1, so in order to comply, we send length-1
407 // when relevant data struct we send is not bigger than lnegth-1,
408 // in this specific case, we send struct size 14 when relevant data is 9
409 // so even when we send 13 as length, it's ok, since we copy 13, 9 is intersting
410 // and next time DMAE will read from +14 which is good for us
411 if( CHIP_IS_E1(pdev) )
412 {
413 --command->len;
414 }
415
416
417 command->comp_addr_lo = lm_dmae_idx_to_go_cmd(operation->context->executer_channel) / 4;
418 command->comp_addr_hi = 0;
419
420 command->comp_val = DMAE_GO_VALUE;
421
422 return LM_STATUS_SUCCESS;
423 }
424
lm_dmae_operation_create_sgl(struct _lm_device_t * pdev,IN const u8_t b_sync,IN lm_dmae_context_t * context)425 lm_dmae_operation_t* lm_dmae_operation_create_sgl( struct _lm_device_t* pdev,
426 IN const u8_t b_sync,
427 IN lm_dmae_context_t* context)
428 {
429 lm_dmae_operation_t* operation = NULL;
430 lm_address_t operation_phys_addr = {{0}};
431 lm_address_t executer_phys_addr = {{0}};
432
433 DbgBreakIf(LM_DMAE_MODE_SGL != context->mode);
434 DbgBreakIf(0 == context->completion_word_paddr.as_u64);
435
436 operation = mm_alloc_phys_mem(pdev, sizeof(lm_dmae_operation_t), &operation_phys_addr, PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON);
437
438 if (CHK_NULL(operation))
439 {
440 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
441 return NULL;
442 }
443
444 mm_mem_zero(operation, sizeof(lm_dmae_operation_t));
445
446 operation->mode = LM_DMAE_MODE_SGL;
447 operation->context = context;
448 operation->b_sync = b_sync;
449
450 executer_phys_addr = operation_phys_addr;
451 LM_INC64(&executer_phys_addr, OFFSETOF(lm_dmae_operation_t, executer_cmd[0]));
452 operation->executer_paddr = executer_phys_addr;
453
454 lm_dmae_initialize_sgl_loader_command(pdev, operation, &operation->main_cmd);
455 return operation;
456 }
457
lm_dmae_operation_add_sge(struct _lm_device_t * pdev,lm_dmae_operation_t * operation,IN const lm_dmae_address_t source,IN const lm_dmae_address_t dest,IN const u16_t length)458 lm_status_t lm_dmae_operation_add_sge( struct _lm_device_t* pdev,
459 lm_dmae_operation_t* operation,
460 IN const lm_dmae_address_t source,
461 IN const lm_dmae_address_t dest,
462 IN const u16_t length)
463 {
464 u8_t last_sge_idx = 0;
465 u8_t new_sge_idx = 0;
466 struct dmae_cmd* last_sge = NULL;
467 lm_status_t lm_status = LM_STATUS_FAILURE;
468
469 if( (LM_DMAE_ADDRESS_HOST_VIRT == source.type) && (LM_DMAE_ADDRESS_HOST_VIRT == dest.type) )
470 {
471 DbgBreakMsg("the intermediate buffer can be used for source or destination but not both.\n");
472 return LM_STATUS_INVALID_PARAMETER;
473 }
474
475 new_sge_idx = operation->next_free_block;
476
477 if (new_sge_idx >= ARRSIZE(operation->blocks))
478 {
479 DbgBreakMsg("Too many SGEs in DMAE operation");
480 return LM_STATUS_INVALID_PARAMETER;
481 }
482
483 if (0 != operation->next_free_block)
484 {
485 last_sge_idx = operation->next_free_block-1;
486 last_sge = &operation->executer_cmd[last_sge_idx];
487
488 SET_FLAGS(last_sge->opcode, 1<<DMAE_CMD_C_DST_SHIFT);
489
490 last_sge->comp_addr_lo = lm_dmae_idx_to_go_cmd(operation->context->main_channel) / 4;
491 last_sge->comp_addr_hi = 0;
492
493 last_sge->comp_val = DMAE_GO_VALUE;
494 }
495
496 operation->blocks[new_sge_idx].source = source;
497 operation->blocks[new_sge_idx].dest = dest;
498 operation->blocks[new_sge_idx].length = length;
499
500 lm_status = lm_dmae_initialize_command_by_block(pdev,
501 operation->context,
502 &operation->executer_cmd[new_sge_idx],
503 &operation->blocks[new_sge_idx],
504 DMAE_SGL_COMPLETION_VAL);
505 if (LM_STATUS_SUCCESS != lm_status)
506 {
507 return lm_status;
508 }
509
510 operation->next_free_block++;
511
512 return lm_status;
513 }
514
lm_dmae_operation_clear_all_sges(lm_dmae_operation_t * operation)515 void lm_dmae_operation_clear_all_sges(lm_dmae_operation_t* operation)
516 {
517 DbgBreakIf(LM_DMAE_MODE_SGL != operation->mode);
518
519 operation->next_free_block = 0;
520 }
521
lm_dmae_operation_is_complete(IN lm_dmae_operation_t * operation)522 u8_t lm_dmae_operation_is_complete(IN lm_dmae_operation_t* operation)
523 {
524 return operation->context->completion_word != operation->command_id;
525 }
526
527 /**lm_dmae_operation_wait
528 * Wait for an operation to finish. Note that this function
529 * busy-waits and does not yield the CPU, so it can be used in
530 * high IRQLs.
531 *
532 * @param pdev the device to use
533 * @param operation the operation to wait for
534 *
535 * @return lm_status_t LM_STATUS_SUCCESS on success,
536 * LM_STATUS_TIMEOUT if the operation did not finish in
537 * reasonable time, LM_STATUS_ABORTED if reset is in
538 * progress.
539 */
540 static lm_status_t
lm_dmae_operation_wait(struct _lm_device_t * pdev,lm_dmae_operation_t * operation)541 lm_dmae_operation_wait(struct _lm_device_t* pdev, lm_dmae_operation_t* operation)
542 {
543 u32_t wait_cnt = 0;
544 u32_t wait_cnt_limit = 10000 * pdev->vars.clk_factor;
545 lm_status_t lm_status = LM_STATUS_SUCCESS;
546
547 while (!lm_dmae_operation_is_complete(operation))
548 {
549 mm_wait(pdev, 20);
550 if (++wait_cnt > wait_cnt_limit)
551 {
552 DbgMessage(pdev,
553 FATAL,
554 "Timed-out waiting for operation %d to complete. Completion word is 0x%x expected 0x%x.\n",(u64_t)operation->command_id,
555 (u64_t)operation->context->completion_word,
556 (u64_t)operation->context->completion_value);
557 lm_status = LM_STATUS_TIMEOUT;
558 break;
559 }
560
561 if (lm_reset_is_inprogress(pdev))
562 {
563 lm_status = LM_STATUS_ABORTED;
564 break;
565 }
566 }
567
568 if (LM_STATUS_SUCCESS != lm_status)
569 {
570 if (LM_STATUS_SUCCESS != lm_dmae_context_reset(operation->context))
571 {
572 DbgBreakMsg("Unable to clean up after a DMAE error. DMAE context is unusable.\n");
573 }
574 }
575
576 return lm_status;
577 }
578
579 /**
580 * @}
581 */
582
583 /**
584 * @defgroup DMAE_Context DMAE Context
585 * @{
586 */
587
lm_dmae_context_create(struct _lm_device_t * pdev,IN const u8_t channel_idx,IN lm_dmae_locking_policy_t * locking_policy,IN const u8_t change_endianity)588 lm_dmae_context_t* lm_dmae_context_create( struct _lm_device_t* pdev,
589 IN const u8_t channel_idx,
590 IN lm_dmae_locking_policy_t* locking_policy,
591 IN const u8_t change_endianity)
592 {
593 lm_dmae_context_t* context = NULL;
594 lm_address_t context_paddr = {{0}};
595 lm_address_t completion_word_paddr = {{0}};
596 lm_address_t intermediate_buffer_paddr = {{0}};
597
598 context = mm_alloc_phys_mem(pdev, sizeof(lm_dmae_context_t), &context_paddr, PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON);
599
600 if (CHK_NULL(context))
601 {
602 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
603 return NULL;
604 }
605
606 context->mode = LM_DMAE_MODE_SINGLE_BLOCK;
607 context->main_channel = channel_idx;
608 context->executer_channel = (u8_t)-1;
609 context->locking_policy = locking_policy;
610 context->change_endianity = change_endianity;
611 context->next_command_id = 1;
612
613 #ifndef __BIG_ENDIAN
614 // if we changed the endianity, the completion word should be swapped
615 context->completion_value = context->change_endianity ? DMAE_COMPLETION_VAL_SWAPPED : DMAE_COMPLETION_VAL ;
616 #else
617 context->completion_value = DMAE_COMPLETION_VAL;
618 #endif // !__BIG_ENDIAN
619
620 context->completion_word = context->completion_value;
621
622 completion_word_paddr = context_paddr;
623 LM_INC64(&completion_word_paddr, OFFSETOF(lm_dmae_context_t, completion_word));
624 context->completion_word_paddr = completion_word_paddr;
625
626 intermediate_buffer_paddr = context_paddr;
627 LM_INC64(&intermediate_buffer_paddr, OFFSETOF(lm_dmae_context_t, intermediate_buffer));
628 context->intermediate_buffer_paddr = intermediate_buffer_paddr;
629
630 return context;
631 }
632
633
lm_dmae_context_create_sgl(struct _lm_device_t * pdev,IN const u8_t loader_channel_idx,IN const u8_t executer_channel_idx,IN lm_dmae_locking_policy_t * locking_policy,IN const u8_t change_endianity)634 lm_dmae_context_t* lm_dmae_context_create_sgl( struct _lm_device_t* pdev,
635 IN const u8_t loader_channel_idx,
636 IN const u8_t executer_channel_idx,
637 IN lm_dmae_locking_policy_t* locking_policy,
638 IN const u8_t change_endianity)
639 {
640 lm_dmae_context_t* context = NULL;
641 lm_address_t context_paddr = {{0}};
642 lm_address_t completion_word_paddr = {{0}};
643 lm_address_t intermediate_buffer_paddr = {{0}};
644
645 context = mm_alloc_phys_mem(pdev, sizeof(lm_dmae_context_t), &context_paddr, PHYS_MEM_TYPE_NONCACHED, LM_RESOURCE_COMMON);
646
647 if (CHK_NULL(context))
648 {
649 DbgMessage(NULL, FATAL, "Failed to allocate SGL DMAE context.\n");
650 DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
651 return NULL;
652 }
653
654 context->mode = LM_DMAE_MODE_SGL;
655 context->main_channel = loader_channel_idx;
656 context->executer_channel = executer_channel_idx;
657 context->locking_policy = locking_policy;
658 context->change_endianity = change_endianity;
659 context->next_command_id = 1;
660
661 context->completion_value = DMAE_SGL_COMPLETION_VAL;
662
663 context->completion_word = context->completion_value;
664
665 completion_word_paddr = context_paddr;
666 LM_INC64(&completion_word_paddr, OFFSETOF(lm_dmae_context_t, completion_word));
667 context->completion_word_paddr = completion_word_paddr;
668
669 intermediate_buffer_paddr = context_paddr;
670 LM_INC64(&intermediate_buffer_paddr, OFFSETOF(lm_dmae_context_t, intermediate_buffer));
671 context->intermediate_buffer_paddr = intermediate_buffer_paddr;
672
673 return context;
674 }
675
676 /**lm_dmae_context_reset
677 * Bring a DMAE context to a known-good state. This function
678 * must be used on an acquired context. It should be used if for
679 * some reason the context is left in an invalid state (e.g an
680 * error occured during a DMAE transaction using this context).
681 *
682 * @param context the context to reset.
683 *
684 * @return lm_status LM_STATUS_SUCCESS on success, some other
685 * failure code on failure.
686 */
lm_dmae_context_reset(lm_dmae_context_t * context)687 lm_status_t lm_dmae_context_reset(lm_dmae_context_t *context)
688 {
689 context->completion_word = context->completion_value;
690
691 return LM_STATUS_SUCCESS;
692 }
693
694 #ifdef _VBD_
695 __drv_maxIRQL(DISPATCH_LEVEL)
696 __drv_at(context->locking_policy->spinlock.irql, __drv_savesIRQL)
__drv_setsIRQL(DISPATCH_LEVEL)697 __drv_setsIRQL(DISPATCH_LEVEL)
698 #endif
699 lm_status_t lm_dmae_context_acquire(struct _lm_device_t* pdev, lm_dmae_context_t *context)
700 {
701 return lm_dmae_locking_policy_lock(pdev, context->locking_policy);
702 }
703
704 #ifdef _VBD_
705 #if defined(NTDDI_WIN8)
706 _IRQL_requires_(DISPATCH_LEVEL)
707 __drv_at(context->locking_policy->spinlock.irql, __drv_restoresIRQL )
708 #endif
709 #endif
lm_dmae_context_release(struct _lm_device_t * pdev,lm_dmae_context_t * context)710 lm_status_t lm_dmae_context_release(struct _lm_device_t* pdev, lm_dmae_context_t *context)
711 {
712 return lm_dmae_locking_policy_unlock(pdev, context->locking_policy);
713 }
714
lm_dmae_context_execute(struct _lm_device_t * pdev,lm_dmae_context_t * context,lm_dmae_operation_t * operation)715 lm_status_t lm_dmae_context_execute(struct _lm_device_t* pdev, lm_dmae_context_t *context, lm_dmae_operation_t *operation)
716 {
717 lm_status_t lm_status = LM_STATUS_FAILURE;
718
719 lm_status = lm_dmae_context_acquire(pdev,context);
720 if (LM_STATUS_SUCCESS != lm_status)
721 {
722 DbgBreakMsg("Failed to acquire context.\n");
723 return lm_status;
724 }
725
726 lm_status = lm_dmae_context_execute_unsafe(pdev, context,operation);
727 if (LM_STATUS_SUCCESS != lm_status)
728 {
729 DbgMessage(pdev, FATAL, "lm_dmae_context_execute_unsafe returned %d\n", lm_status);
730 if (LM_STATUS_ABORTED != lm_status)
731 {
732 //we'll let the caller decide if DbgBreak should be called when lm_reset_is_inprogress interrupts a DMAE operation.
733 DbgBreakMsg("DMAE execution failed.\n");
734 }
735
736 //don't return - release the context first.
737 }
738
739 //no need to check the return code, since we can't really recover from
740 //not being able to release the context anyway.
741 lm_dmae_context_release(pdev,context);
742
743 return lm_status;
744 }
745
746 /**lm_dmae_context_advance_command_id
747 * DMAE context has a 'private' variable of the next command ID
748 * to use. This function returns the next valid value for this
749 * context's command ID in a thread-safe manner.
750 *
751 * @param context the context to change
752 *
753 * @return u32_t the new command ID for the context.
754 */
755 static u32_t
lm_dmae_context_advance_command_id(lm_dmae_context_t * context)756 lm_dmae_context_advance_command_id(lm_dmae_context_t* context)
757 {
758 u32_t cmd_id = mm_atomic_inc(&context->next_command_id);
759
760 if ((0 == cmd_id)||
761 (context->completion_value == cmd_id))
762 {
763 cmd_id = mm_atomic_inc(&context->next_command_id);
764 }
765
766 return cmd_id;
767 }
768
769 // Copy the loader command to DMAE - need to do it before every call - for source/dest address no reset...
770 // Due to parity checks error, we write zero for last 5 registers of command (9-13, zero based)
771 static void
lm_dmae_post_command(IN struct _lm_device_t * pdev,IN const u8_t idx_cmd,IN const struct dmae_cmd * command)772 lm_dmae_post_command( IN struct _lm_device_t* pdev,
773 IN const u8_t idx_cmd,
774 IN const struct dmae_cmd* command )
775 {
776 u8_t i = 0 ;
777
778 DbgBreakIf(IS_VFDEV(pdev));
779
780 if ( CHK_NULL(pdev) || CHK_NULL(command))
781 {
782 return;
783 }
784
785 // verify address is not NULL
786 if ERR_IF( ( ( 0 == command->dst_addr_lo ) && ( command->dst_addr_hi == command->dst_addr_lo ) ) ||
787 ( ( 0 == command->src_addr_lo ) && ( command->src_addr_hi == command->src_addr_lo ) ) )
788
789 {
790 DbgMessage(pdev,
791 FATAL,
792 "lm_dmae_command: idx_cmd=%d opcode = 0x%x opcode_iov=0x%x len=0x%x src=0x%x:%x dst=0x%x:%x\n",
793 idx_cmd,
794 (int)command->opcode,
795 (int)command->opcode_iov,
796 (int)command->len,
797 (int)command->src_addr_hi,
798 (int)command->src_addr_lo,
799 (int)command->dst_addr_hi,
800 (int)command->dst_addr_lo );
801 DbgBreakMsg("lm_dmae_command: Trying to write/read to NULL address\n");
802 }
803
804 // Copy the command to DMAE - need to do it before every call - for source/dest address no reset...
805 // Due to parity checks error, we write zero for last 5 registers of command (9-13, zero based)
806 for( i = 0 ; i < 14 ; i++ )
807 {
808 REG_WR( pdev,
809 DMAE_REG_CMD_MEM+(idx_cmd*DMAE_CMD_SIZE*sizeof(u32_t))+i*sizeof(u32_t),
810 i < 9 ? *(((u32_t*)command)+i) : 0 ) ;
811 }
812
813 REG_WR(pdev, lm_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE) ;
814 }
815
816
817 /**lm_dmae_context_execute_single_block
818 * Execute an SGL operation without acquiring the
819 * context.
820 *
821 *
822 * @param pdev the device to use
823 * @param context the context that executes the operation
824 * @param operation the operation to execute
825 *
826 * @return lm_status_t LM_STATUS_SUCCESS on success,
827 * LM_STATUS_TIMEOUT if the operation did not finish in
828 * reasonable time, some other failure value on failure.
829 */
lm_dmae_context_execute_sgl(struct _lm_device_t * pdev,lm_dmae_context_t * context,lm_dmae_operation_t * operation)830 lm_status_t lm_dmae_context_execute_sgl(struct _lm_device_t* pdev, lm_dmae_context_t *context, lm_dmae_operation_t *operation)
831 {
832 lm_status_t lm_status = LM_STATUS_SUCCESS;
833
834 context->completion_word = operation->command_id;
835
836 lm_dmae_post_command(pdev, context->main_channel, &operation->main_cmd);
837
838 if (operation->b_sync)
839 {
840 lm_status = lm_dmae_operation_wait(pdev, operation);
841 if (LM_STATUS_SUCCESS != lm_status)
842 {
843 DbgMessage(pdev, FATAL, "lm_dmae_operation_wait returned %d\n", lm_status);
844 }
845 }
846
847 return lm_status;
848 }
849
850 /**lm_dmae_context_execute_sub_operation
851 * lm_dmae_context_execute_single_block splits every command to
852 * sub-operations, each with a length that is less the the HW
853 * limit for DMAE lengths. This function executes one of these
854 * sub-operations.
855 * Note: This function modifies operation->main_cmd.
856 *
857 *
858 * @param pdev the device to use
859 * @param context the context that executes the operation
860 * @param operation the operation to execute
861 * @param src_offset the source offset of the current
862 * sub-operation. This value overrides
863 * whatever is stored in operation
864 * @param dst_offset the destination offset of the current
865 * sub-operation. This value overrides
866 * whatever is stored in operation
867 * @param length the length of the current sub-operation. This
868 * value overrides whatever is stored in operation
869 *
870 * @return lm_status_t LM_STATUS_SUCCESS on success,
871 * LM_STATUS_TIMEOUT if the operation did not finish in
872 * reasonable time, some other failure value on failure.
873 */
874 static lm_status_t
lm_dmae_context_execute_sub_operation(struct _lm_device_t * pdev,lm_dmae_context_t * context,lm_dmae_operation_t * operation,IN const u64_t src_offset,IN const u64_t dst_offset,IN const u16_t length)875 lm_dmae_context_execute_sub_operation( struct _lm_device_t* pdev,
876 lm_dmae_context_t *context,
877 lm_dmae_operation_t *operation,
878 IN const u64_t src_offset,
879 IN const u64_t dst_offset,
880 IN const u16_t length)
881 {
882 lm_status_t lm_status = LM_STATUS_SUCCESS;
883
884 lm_address_t src_addr = {{0}};
885 lm_address_t dst_addr = {{0}};
886
887 u16_t i = 0;
888
889 src_addr.as_u64 = src_offset;
890 dst_addr.as_u64 = dst_offset;
891
892 switch (operation->blocks[0].source.type)
893 {
894 case LM_DMAE_ADDRESS_GRC://fallthrough
895 case LM_DMAE_ADDRESS_HOST_PHYS:
896 {
897 operation->main_cmd.src_addr_hi = src_addr.as_u32.high;
898 operation->main_cmd.src_addr_lo = src_addr.as_u32.low;
899 }
900 break;
901 case LM_DMAE_ADDRESS_HOST_VIRT: //for virtual source addresses we use the intermediate buffer.
902 {
903 operation->main_cmd.src_addr_hi = context->intermediate_buffer_paddr.as_u32.high;
904 operation->main_cmd.src_addr_lo = context->intermediate_buffer_paddr.as_u32.low;
905
906 mm_memcpy( &context->intermediate_buffer[0], src_addr.as_ptr, length*sizeof(u32_t));
907 if (operation->le32_swap)
908 {
909 for (i=0; i < length; ++i)
910 {
911 context->intermediate_buffer[i] = mm_cpu_to_le32(context->intermediate_buffer[i]);
912 }
913 }
914 }
915 break;
916 default:
917 {
918 DbgBreakMsg("Unknown source address type for DMAE operation.\n");
919 return LM_STATUS_INVALID_PARAMETER;
920 }
921 break;
922 }
923
924 switch (operation->blocks[0].dest.type)
925 {
926 case LM_DMAE_ADDRESS_GRC://fallthrough
927 case LM_DMAE_ADDRESS_HOST_PHYS:
928 {
929 operation->main_cmd.dst_addr_hi = dst_addr.as_u32.high;
930 operation->main_cmd.dst_addr_lo = dst_addr.as_u32.low;
931 }
932 break;
933 case LM_DMAE_ADDRESS_HOST_VIRT: //for virtual source addresses we use the intermediate buffer.
934 {
935 operation->main_cmd.dst_addr_hi = context->intermediate_buffer_paddr.as_u32.high;
936 operation->main_cmd.dst_addr_lo = context->intermediate_buffer_paddr.as_u32.low;
937 }
938 break;
939 default:
940 {
941 DbgBreakMsg("Unknown destination address type for DMAE operation.\n");
942 return LM_STATUS_INVALID_PARAMETER;
943 }
944 break;
945 }
946
947 DbgBreakIf(context->completion_word != context->completion_value);
948
949 context->completion_word = operation->command_id;
950
951 operation->main_cmd.len = length;
952
953 lm_dmae_post_command(pdev, context->main_channel, &operation->main_cmd);
954
955 lm_status = lm_dmae_operation_wait(pdev, operation);
956 if (LM_STATUS_SUCCESS != lm_status)
957 {
958 return lm_status;
959 }
960
961 DbgBreakIf(context->completion_word != context->completion_value);
962
963 if (operation->blocks[0].dest.type == LM_DMAE_ADDRESS_HOST_VIRT)
964 {
965 mm_memcpy( dst_addr.as_ptr, &context->intermediate_buffer[0], length*sizeof(u32_t));
966 }
967
968 return lm_status;
969 }
970
971 /**lm_dmae_context_execute_single_block
972 * Execute a single-block operation without acquiring the
973 * context.
974 *
975 *
976 * @param pdev the device to use
977 * @param context the context that executes the operation
978 * @param operation the operation to execute
979 *
980 * @return lm_status_t LM_STATUS_SUCCESS on success,
981 * LM_STATUS_TIMEOUT if the operation did not finish in
982 * reasonable time, some other failure value on failure.
983 */
984 static lm_status_t
lm_dmae_context_execute_single_block(struct _lm_device_t * pdev,lm_dmae_context_t * context,lm_dmae_operation_t * operation)985 lm_dmae_context_execute_single_block(struct _lm_device_t* pdev, lm_dmae_context_t *context, lm_dmae_operation_t *operation)
986 {
987 lm_status_t lm_status = LM_STATUS_SUCCESS ;
988 u16_t length_current = 0 ;
989 u16_t i = 0 ;
990 u32_t offset = 0 ;
991 lm_address_t src_addr = {{0}};
992 lm_address_t dst_addr = {{0}};
993 u64_t src_addr_split = 0;
994 u64_t dst_addr_split = 0;
995
996
997 const u16_t length_limit = (operation->blocks[0].dest.type != LM_DMAE_ADDRESS_GRC) ? min( DMAE_MAX_READ_SIZE, DMAE_MAX_RW_SIZE(pdev) ) : DMAE_MAX_RW_SIZE(pdev) ;
998 u16_t cnt_split = 0; // number of chunks of splits
999 u16_t length_mod = 0;
1000
1001 DbgBreakIf(0 == length_limit); //to avoid divide-by-0. can't do static assert because it depends on CHIP_ID
1002
1003 cnt_split = operation->blocks[0].length / length_limit;
1004 length_mod = operation->blocks[0].length % length_limit;
1005
1006 src_addr.as_u64 = lm_dmae_address_native_offset(&operation->blocks[0].source);
1007 src_addr_split = src_addr.as_u64;
1008
1009 dst_addr.as_u64 = lm_dmae_address_native_offset(&operation->blocks[0].dest);
1010 dst_addr_split = dst_addr.as_u64;
1011
1012
1013 DbgBreakIf(IS_VFDEV(pdev));
1014
1015 if ( CHK_NULL(pdev) || ERR_IF( 0 == operation->blocks[0].length ) )
1016 {
1017 return LM_STATUS_INVALID_PARAMETER ;
1018 }
1019
1020 for( i = 0; i <= cnt_split; i++ )
1021 {
1022 offset = length_limit*i ;
1023
1024 if( !operation->b_replicate_source )
1025 {
1026 if (operation->blocks[0].source.type == LM_DMAE_ADDRESS_GRC)
1027 {
1028 src_addr_split = src_addr.as_u64 + offset;
1029 }
1030 else
1031 {
1032 src_addr_split = src_addr.as_u64 + (offset*4);
1033 }
1034 }
1035
1036 if (operation->blocks[0].dest.type == LM_DMAE_ADDRESS_GRC)
1037 {
1038 dst_addr_split = dst_addr.as_u64 + offset;
1039 }
1040 else
1041 {
1042 dst_addr_split = dst_addr.as_u64 + (offset*4);
1043 }
1044
1045 length_current = (cnt_split==i)? length_mod : length_limit ;
1046
1047 // might be zero on last iteration
1048 if( 0 != length_current )
1049 {
1050 lm_status = lm_dmae_context_execute_sub_operation(pdev, context, operation, src_addr_split, dst_addr_split, length_current);
1051 if( LM_STATUS_SUCCESS != lm_status )
1052 {
1053 return lm_status ;
1054 }
1055 }
1056 }
1057
1058 return lm_status ;
1059 }
1060
lm_dmae_context_execute_unsafe(struct _lm_device_t * pdev,lm_dmae_context_t * context,lm_dmae_operation_t * operation)1061 lm_status_t lm_dmae_context_execute_unsafe(struct _lm_device_t* pdev, lm_dmae_context_t *context, lm_dmae_operation_t *operation)
1062 {
1063 lm_status_t lm_status = LM_STATUS_FAILURE;
1064
1065 if (context->completion_word != context->completion_value)
1066 {
1067 return LM_STATUS_BUSY;
1068 }
1069
1070 DbgBreakIf(context->mode != operation->mode);
1071
1072 operation->command_id = lm_dmae_context_advance_command_id(context);
1073
1074 switch (context->mode)
1075 {
1076 case LM_DMAE_MODE_SINGLE_BLOCK:
1077 {
1078 lm_status = lm_dmae_context_execute_single_block(pdev, context, operation);
1079 }
1080 break;
1081 case LM_DMAE_MODE_SGL:
1082 {
1083 lm_status = lm_dmae_context_execute_sgl(pdev, context, operation);
1084 }
1085 break;
1086 default:
1087 {
1088 DbgBreakMsg("Unknown context mode.\n");
1089 lm_status = LM_STATUS_INVALID_PARAMETER;
1090 }
1091 break;
1092 }
1093
1094 return lm_status;
1095 }
1096
1097 /**
1098 * @}
1099 */
1100
1101
1102 /**
1103 * @defgroup DMAE_Address DMAE address
1104 * @{
1105 */
1106
lm_dmae_address_native_offset(IN const lm_dmae_address_t * address)1107 u64_t lm_dmae_address_native_offset(IN const lm_dmae_address_t* address)
1108 {
1109 switch (address->type)
1110 {
1111 case LM_DMAE_ADDRESS_GRC:
1112 {
1113 return address->u.grc_offset / sizeof(u32_t);
1114 }
1115 break;
1116 case LM_DMAE_ADDRESS_HOST_PHYS:
1117 {
1118 return address->u.host_phys_address.as_u64;
1119 }
1120 break;
1121 case LM_DMAE_ADDRESS_HOST_VIRT:
1122 {
1123 lm_address_t temp;
1124 temp.as_ptr = address->u.host_virt_address;
1125 return temp.as_u64;
1126 }
1127 break;
1128 default:
1129 {
1130 DbgBreakMsg("Unknown address type.\n");
1131 return 0;
1132 }
1133 break;
1134
1135 }
1136 }
1137
lm_dmae_address(IN const u64_t offset,IN const lm_dmae_address_type_t type)1138 lm_dmae_address_t lm_dmae_address(IN const u64_t offset, IN const lm_dmae_address_type_t type)
1139 {
1140 lm_dmae_address_t address = {{0}};
1141
1142 address.type = type;
1143
1144 switch (type)
1145 {
1146 case LM_DMAE_ADDRESS_GRC:
1147 {
1148 ASSERT_STATIC(sizeof(address.u.grc_offset) == sizeof(u32_t));
1149 DbgBreakIf (offset > MAX_VARIABLE_VALUE(address.u.grc_offset));
1150
1151 address.u.grc_offset = (u32_t)offset;
1152 }
1153 break;
1154 case LM_DMAE_ADDRESS_HOST_PHYS:
1155 {
1156 address.u.host_phys_address.as_u64 = offset;
1157 }
1158 break;
1159 case LM_DMAE_ADDRESS_HOST_VIRT:
1160 {
1161 lm_address_t temp;
1162 temp.as_u64 = offset;
1163
1164 address.u.host_virt_address = temp.as_ptr;
1165 }
1166 break;
1167 default:
1168 {
1169 DbgBreakMsg("Unknown address type.\n");
1170 }
1171 break;
1172 }
1173
1174 return address;
1175 }
1176
1177 /**
1178 * @}
1179 */
1180
1181
1182 /**
1183 * @defgroup DMAE_User DMAE users API
1184 * @{
1185 */
1186
lm_dmae_get(struct _lm_device_t * pdev,IN const lm_dmae_type_t type)1187 lm_dmae_context_info_t* lm_dmae_get(struct _lm_device_t* pdev, IN const lm_dmae_type_t type)
1188 {
1189 ASSERT_STATIC(LM_DMAE_MAX_TYPE == ARRSIZE(pdev->dmae_info.ctx_arr));
1190
1191 if (type >= LM_DMAE_MAX_TYPE)
1192 {
1193 DbgBreakMsg("Invalid DMAE user index.\n");
1194 return NULL;
1195 }
1196
1197 return &pdev->dmae_info.ctx_arr[type];
1198 }
1199
1200 static const u32_t MAX_GRC_OFFSET = 0x00400000; //GRC space is 4MB for 57710-578xx
1201
lm_dmae_reg_wr(struct _lm_device_t * pdev,lm_dmae_context_t * context,void * source_vaddr,u32_t dest_offset,u16_t length,u8_t replicate_source,u8_t le32_swap)1202 lm_status_t lm_dmae_reg_wr(struct _lm_device_t* pdev, lm_dmae_context_t* context, void* source_vaddr, u32_t dest_offset, u16_t length, u8_t replicate_source, u8_t le32_swap)
1203 {
1204 lm_address_t source_offset = {{0}};
1205 lm_dmae_address_t source = {{0}};
1206 lm_dmae_address_t dest = lm_dmae_address(dest_offset, LM_DMAE_ADDRESS_GRC);
1207 lm_dmae_operation_t operation = {0};
1208 lm_status_t lm_status = LM_STATUS_FAILURE;
1209
1210 DbgBreakIf(dest_offset > MAX_GRC_OFFSET); //make sure dest_offset is a valid GRC offset
1211
1212 source_offset.as_ptr = source_vaddr;
1213 source = lm_dmae_address(source_offset.as_u64, LM_DMAE_ADDRESS_HOST_VIRT);
1214
1215 lm_status = lm_dmae_operation_create(pdev, source, dest, length, replicate_source, le32_swap, context, &operation);
1216 if (LM_STATUS_SUCCESS != lm_status)
1217 {
1218 return lm_status;
1219 }
1220
1221 lm_status = lm_dmae_context_execute(pdev, context, &operation);
1222 if (LM_STATUS_SUCCESS != lm_status)
1223 {
1224 return lm_status;
1225 }
1226
1227 return lm_status;
1228 }
1229
lm_dmae_reg_wr_phys(struct _lm_device_t * pdev,lm_dmae_context_t * context,lm_address_t source_paddr,u32_t dest_offset,u16_t length)1230 lm_status_t lm_dmae_reg_wr_phys(struct _lm_device_t* pdev, lm_dmae_context_t* context, lm_address_t source_paddr, u32_t dest_offset, u16_t length)
1231 {
1232 lm_dmae_address_t source =lm_dmae_address(source_paddr.as_u64, LM_DMAE_ADDRESS_HOST_PHYS);
1233 lm_dmae_address_t dest = lm_dmae_address(dest_offset, LM_DMAE_ADDRESS_GRC);
1234 lm_dmae_operation_t operation = {0};
1235 lm_status_t lm_status = LM_STATUS_FAILURE;
1236
1237 DbgBreakIf(dest_offset > MAX_GRC_OFFSET); //make sure dest_offset is a valid GRC offset
1238
1239 lm_status = lm_dmae_operation_create(pdev, source, dest, length, FALSE, FALSE, context, &operation);
1240 if (LM_STATUS_SUCCESS != lm_status)
1241 {
1242 return lm_status;
1243 }
1244
1245 lm_status = lm_dmae_context_execute(pdev, context, &operation);
1246 if (LM_STATUS_SUCCESS != lm_status)
1247 {
1248 return lm_status;
1249 }
1250
1251 return lm_status;
1252 }
1253
lm_dmae_reg_rd(struct _lm_device_t * pdev,lm_dmae_context_t * context,u32_t source_offset,void * dest_vaddr,u16_t length,u8_t le32_swap)1254 lm_status_t lm_dmae_reg_rd(struct _lm_device_t* pdev, lm_dmae_context_t* context, u32_t source_offset, void* dest_vaddr, u16_t length, u8_t le32_swap)
1255 {
1256 lm_address_t dest_offset = {{0}};
1257 lm_dmae_address_t source = lm_dmae_address(source_offset, LM_DMAE_ADDRESS_GRC);
1258 lm_dmae_address_t dest = {{0}};
1259 lm_dmae_operation_t operation = {0};
1260 lm_status_t lm_status = LM_STATUS_FAILURE;
1261
1262 DbgBreakIf(source_offset > MAX_GRC_OFFSET); //make sure source_offset is a valid GRC offset
1263
1264 dest_offset.as_ptr = dest_vaddr;
1265 dest = lm_dmae_address(dest_offset.as_u64, LM_DMAE_ADDRESS_HOST_VIRT);
1266
1267 lm_status = lm_dmae_operation_create(pdev, source, dest, length, FALSE, le32_swap, context, &operation);
1268 if (LM_STATUS_SUCCESS != lm_status)
1269 {
1270 return lm_status;
1271 }
1272
1273 lm_status = lm_dmae_context_execute(pdev, context, &operation);
1274 if (LM_STATUS_SUCCESS != lm_status)
1275 {
1276 return lm_status;
1277 }
1278
1279 return lm_status;
1280 }
1281
lm_dmae_copy_phys_buffer_unsafe(struct _lm_device_t * pdev,lm_dmae_context_t * context,lm_address_t source_paddr,lm_address_t dest_paddr,u16_t length)1282 lm_status_t lm_dmae_copy_phys_buffer_unsafe(struct _lm_device_t* pdev, lm_dmae_context_t* context, lm_address_t source_paddr, lm_address_t dest_paddr, u16_t length)
1283 {
1284 lm_dmae_address_t source = lm_dmae_address(source_paddr.as_u64, LM_DMAE_ADDRESS_HOST_PHYS);
1285 lm_dmae_address_t dest = lm_dmae_address(dest_paddr.as_u64, LM_DMAE_ADDRESS_HOST_PHYS);
1286 lm_dmae_operation_t operation = {0};
1287 lm_status_t lm_status = LM_STATUS_FAILURE;
1288
1289 lm_status = lm_dmae_operation_create(pdev, source, dest, length, FALSE, FALSE, context, &operation);
1290 if (LM_STATUS_SUCCESS != lm_status)
1291 {
1292 return lm_status;
1293 }
1294
1295 lm_status = lm_dmae_context_execute_unsafe(pdev, context, &operation);
1296 if (LM_STATUS_ABORTED == lm_status)
1297 {
1298 //if the operation failed due to lm_reset_is_inprogress, treat it as success.
1299 lm_status = LM_STATUS_SUCCESS;
1300 }
1301
1302 if (LM_STATUS_SUCCESS != lm_status)
1303 {
1304 return lm_status;
1305 }
1306
1307 return lm_status;
1308 }
1309
1310
1311 /**
1312 * @}
1313 */
1314
1315