xref: /titanic_41/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/include/command.h (revision f391a51a4e9639750045473dba1cc2831267c93e)
1 #ifndef __COMMAND_H__
2 #define __COMMAND_H__
3 
4 /* This file containes the slow-path send queue and the L2 Command code */
5 #include "57712_reg.h"
6 #include "577xx_int_offsets.h"
7 #include "context.h"
8 #include "lm5710.h"
9 //#include "5710_hsi.h"
10 //#define MAX_PROTO 2
11 
12 /* How many slow-path-queue elements can be sent in parallel divided into normal and high priority */
13 #define MAX_NORMAL_PRIORITY_SPE 7
14 #define MAX_HIGH_PRIORITY_SPE   1
15 #define MAX_NUM_SPE 8
16 
17 #define CMD_PRIORITY_NORMAL 0x10
18 #define CMD_PRIORITY_MEDIUM 0x20
19 #define CMD_PRIORITY_HIGH   0x30
20 
21 
22 
23 
24 /* structure representing a list of slow-path-completions */
25 typedef struct _sp_cqes_info {
26     union eth_rx_cqe sp_cqe[MAX_NUM_SPE];
27     u8_t idx;
28 } sp_cqes_info;
29 
30 
_lm_sq_post(struct _lm_device_t * pdev,struct sq_pending_command * pending)31 static __inline void _lm_sq_post(struct _lm_device_t *pdev,struct sq_pending_command * pending)
32 {
33     u32_t func = FUNC_ID(pdev);
34 
35     /* TODO replace this with the proper struct */
36     /* CID needs port number to be encoded int it */
37     mm_memcpy(pdev->sq_info.sq_chain.prod_bd, &pending->command, sizeof(pending->command));
38 
39     pdev->sq_info.sq_chain.prod_idx ++;
40     pdev->sq_info.sq_chain.bd_left --;
41 
42     if (pdev->sq_info.sq_chain.prod_bd == pdev->sq_info.sq_chain.last_bd) {
43         pdev->sq_info.sq_chain.prod_bd = pdev->sq_info.sq_chain.sq_chain_virt;
44     }else{
45         pdev->sq_info.sq_chain.prod_bd ++ ;
46     }
47 
48 
49     DbgMessage(pdev,VERBOSEl2sp | VERBOSEl4sp, "Writing SP prod %d, conn_and_cmd_data=%x, type=%d \n",pdev->sq_info.sq_chain.prod_idx, pending->command.hdr.conn_and_cmd_data, pending->command.hdr.type);
50 
51     if (IS_PFDEV(pdev) && pdev->sq_info.sq_state == SQ_STATE_NORMAL) {
52         LM_INTMEM_WRITE16(pdev, XSTORM_SPQ_PROD_OFFSET(func), pdev->sq_info.sq_chain.prod_idx, BAR_XSTRORM_INTMEM);
53     }
54 #ifdef VF_INVOLVED
55     else {
56         LM_INTMEM_WRITE16(PFDEV(pdev),XSTORM_VF_SPQ_PROD_OFFSET(ABS_VFID(pdev)), pdev->sq_info.sq_chain.prod_idx, BAR_XSTRORM_INTMEM);
57     }
58 #endif
59 }
60 
61 /**
62  *  @Description: This function fills a command that is received
63  *              as a parameter given the input...
64  *
65  * @param pdev
66  * @param pending - OUT: this entry is filled given the input
67  *                below
68  * @param cid
69  * @param command - FW Command ID
70  * @param type   - The type of connection, can optionally
71  *               include the function id as well if it differs
72  *               from the function of pdev (for example for VFs)
73  *
74  * @param data - Data for FW ramrod
75  * @param release_mem_flag - Determines whether the sp pending
76  *                         command will be returned to the pool
77  *                         at the end of usage.
78  */
lm_sq_post_fill_entry(struct _lm_device_t * pdev,struct sq_pending_command * pending,u32_t cid,u8_t command,u16_t type,u64_t data,u8_t release_mem_flag)79 static __inline void lm_sq_post_fill_entry(struct _lm_device_t* pdev,
80                                            struct sq_pending_command * pending,
81                                            u32_t                cid,
82                                            u8_t                 command,
83                                            u16_t                type,
84                                            u64_t                data,
85                                            u8_t                 release_mem_flag)
86 {
87     /* In some cases type may already contain the func-id (VF specifically) so we add it only if it's not there... */
88     if (!(type & SPE_HDR_T_FUNCTION_ID))
89     {
90         type |= (FUNC_ID(pdev) << SPE_HDR_T_FUNCTION_ID_SHIFT);
91     }
92 
93     // CID MSB is function number
94     pending->command.hdr.conn_and_cmd_data = mm_cpu_to_le32((command << SPE_HDR_T_CMD_ID_SHIFT ) | HW_CID(pdev,cid));
95     pending->command.hdr.type = mm_cpu_to_le16(type);
96     pending->command.protocol_data.hi = mm_cpu_to_le32(U64_HI(data));
97     pending->command.protocol_data.lo = mm_cpu_to_le32(U64_LO(data));
98     pending->flags = 0;
99 
100     if (release_mem_flag)
101     {
102         SET_FLAGS(pending->flags, SQ_PEND_RELEASE_MEM);
103     }
104 
105     pending->cid  = cid;
106     pending->type = type; /* don't kill function ID, RSC VF update really uses the value (& SPE_HDR_T_CONN_TYPE);*/
107     pending->cmd  = command;
108 
109 }
110 
111 /**
112  * Description
113  *	Add the entry to the pending SP list.
114  *	Try to add entry's from the list to the sq_chain if possible.(there is are less then 8 ramrod commands pending)
115  *
116  * @param pdev
117  * @param pending  - The pending list entry.
118  * @param priority - (high or low) to witch list to insert the pending list entry.
119  *
120  * @return lm_status_t: LM_STATUS_SUCCESS on success or
121  *         LM_STATUS_REQUEST_NOT_ACCEPTED if slowpath queue is
122  *         in blocked state.
123  */
124 lm_status_t lm_sq_post_entry(struct _lm_device_t       * pdev,
125                              struct sq_pending_command * pending,
126                              u8_t                        priority);
127 
128 /*
129     post a ramrod to the sq
130     takes the sq pending list spinlock and adds the request
131     will not block
132     but the actuall posting to the sq might be deffered until there is room
133     MUST only have one request pending per CID (this is up to the caller to enforce)
134 */
135 lm_status_t lm_sq_post(struct _lm_device_t *pdev,
136                        u32_t                cid,
137                        u8_t                 command,
138                        u8_t                 priority,
139                        u16_t                type,
140                        u64_t                data);
141 
142 /**
143  * @Description
144  *      inform the sq mechanism of completed ramrods because the
145  *      completions arrive on the fast-path rings the fast-path
146  *      needs to inform the sq that the ramrod has been serviced
147  *      will not block, it also needs to notify which ramrod has
148  *      been completed since completions can arrive in a different
149  *      sequence than sent.
150  * @param pdev
151  * @param priority: priority of ramrod being completed
152  *                (different credits)
153  * @param command:  which command is completed
154  * @param type:     connection type
155  * @param cid:      connection id that ramrod was sent with
156  */
157 void lm_sq_complete(struct _lm_device_t *pdev, u8_t priority,
158                     u8_t command, u16_t type, u32_t cid );
159 
160 /**
161  * @description
162  *    do any deffered posting pending on the sq, will take the list spinlock
163  *    will not block. Check sq state, if its pending (it means no hw...) call flush
164  *    at the end, which will take care of completing these completions internally.
165  * @param pdev
166  *
167  * @return lm_status_t SUCCESS: is no pending requests were sent. PENDING if a
168  *                              if pending request was sent.
169  */
170 lm_status_t lm_sq_post_pending(struct _lm_device_t *pdev);
171 
172 /*
173     post a slow-path command
174     takes a spinlock, does not sleep
175     actuall command posting may be delayed
176 */
lm_command_post(struct _lm_device_t * pdev,u32_t cid,u8_t command,u8_t priority,u16_t type,u64_t data)177 static __inline lm_status_t lm_command_post( struct _lm_device_t* pdev,
178                                    u32_t                cid,
179                                    u8_t                 command,
180                                    u8_t                 priority,
181                                    u16_t                type,
182                                    u64_t                data )
183 {
184     return lm_sq_post(pdev, cid, command, priority, type, data );
185 }
186 
187 
188 /* TODO: move functions above to lm_sp.c */
189 /**
190  * @Description
191  *      change state of slowpath queue.
192  *
193  * @param pdev
194  * @param state NORMAL, PENDING, BLOCKED
195  */
196 void lm_sq_change_state(struct _lm_device_t *pdev, lm_sq_state_t state);
197 
198 /**
199  * @Description
200  *      This function completes any pending slowpath requests.
201  *      It does this as if they were completed via cookie...
202  *      It needs to know all the possible cookies and which
203  *      completions to give. Any new ramrod should be added to
204  *      this function. Also if it should be ignored.
205  *
206  * @param pdev
207  */
208 void lm_sq_complete_pending_requests(struct _lm_device_t *pdev);
209 
210 /**
211  * @Description
212  *      This function takes care of registering a DPC for
213  *      completing slowpaths internally in the driver (if such
214  *      exist)
215  * @param pdev
216  *
217  * @return lm_status_t SUCCESS: if all flushed (i.e. dpc not
218  *                              scheduled)
219  *                      PENDING: if dpc is scheduled
220  */
221 lm_status_t lm_sq_flush(struct _lm_device_t *pdev);
222 
223 /**
224  * @Description
225  *      Checks if the sq is empty
226  *
227  * @param pdev
228  *
229  * @return u8_t TRUE if empty FALSE o/w
230  */
231 u8_t lm_sq_is_empty(struct _lm_device_t *pdev);
232 
233 
234 lm_status_t lm_sq_comp_cb_register(struct _lm_device_t *pdev, u8_t type, lm_sq_comp_cb_t cb);
235 
236 lm_status_t lm_sq_comp_cb_deregister(struct _lm_device_t *pdev, u8_t type);
237 
238 
239 
240 #endif //__COMMAND_H__
241