xref: /linux/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 /***********************license start***************
2  * Author: Cavium Networks
3  *
4  * Contact: support@caviumnetworks.com
5  * This file is part of the OCTEON SDK
6  *
7  * Copyright (c) 2003-2008 Cavium Networks
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more
17  * details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this file; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22  * or visit http://www.gnu.org/licenses/.
23  *
24  * This file may also be available under a different license from Cavium.
25  * Contact Cavium Networks for more information
26  ***********************license end**************************************/
27 
28 /*
29  * Support functions for managing command queues used for
30  * various hardware blocks.
31  */
32 
33 #include <linux/kernel.h>
34 
35 #include <asm/octeon/octeon.h>
36 
37 #include <asm/octeon/cvmx-config.h>
38 #include <asm/octeon/cvmx-fpa.h>
39 #include <asm/octeon/cvmx-cmd-queue.h>
40 
41 #include <asm/octeon/cvmx-npei-defs.h>
42 #include <asm/octeon/cvmx-pexp-defs.h>
43 #include <asm/octeon/cvmx-pko-defs.h>
44 
45 /*
46  * This application uses this pointer to access the global queue
47  * state. It points to a bootmem named block.
48  */
49 __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
50 EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
51 
52 /*
53  * Initialize the Global queue state pointer.
54  *
55  * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
56  */
__cvmx_cmd_queue_init_state_ptr(void)57 static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
58 {
59 	char *alloc_name = "cvmx_cmd_queues";
60 	extern uint64_t octeon_reserve32_memory;
61 
62 	if (likely(__cvmx_cmd_queue_state_ptr))
63 		return CVMX_CMD_QUEUE_SUCCESS;
64 
65 	if (octeon_reserve32_memory)
66 		__cvmx_cmd_queue_state_ptr =
67 		    cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
68 						   octeon_reserve32_memory,
69 						   octeon_reserve32_memory +
70 						   (CONFIG_CAVIUM_RESERVE32 <<
71 						    20) - 1, 128, alloc_name);
72 	else
73 		__cvmx_cmd_queue_state_ptr =
74 		    cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
75 					    128,
76 					    alloc_name);
77 	if (__cvmx_cmd_queue_state_ptr)
78 		memset(__cvmx_cmd_queue_state_ptr, 0,
79 		       sizeof(*__cvmx_cmd_queue_state_ptr));
80 	else {
81 		struct cvmx_bootmem_named_block_desc *block_desc =
82 		    cvmx_bootmem_find_named_block(alloc_name);
83 		if (block_desc)
84 			__cvmx_cmd_queue_state_ptr =
85 			    cvmx_phys_to_ptr(block_desc->base_addr);
86 		else {
87 			cvmx_dprintf
88 			    ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
89 			     alloc_name);
90 			return CVMX_CMD_QUEUE_NO_MEMORY;
91 		}
92 	}
93 	return CVMX_CMD_QUEUE_SUCCESS;
94 }
95 
96 /*
97  * Initialize a command queue for use. The initial FPA buffer is
98  * allocated and the hardware unit is configured to point to the
99  * new command queue.
100  *
101  * @queue_id:  Hardware command queue to initialize.
102  * @max_depth: Maximum outstanding commands that can be queued.
103  * @fpa_pool:  FPA pool the command queues should come from.
104  * @pool_size: Size of each buffer in the FPA pool (bytes)
105  *
106  * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
107  */
cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,int max_depth,int fpa_pool,int pool_size)108 cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
109 						  int max_depth, int fpa_pool,
110 						  int pool_size)
111 {
112 	__cvmx_cmd_queue_state_t *qstate;
113 	cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
114 	if (result != CVMX_CMD_QUEUE_SUCCESS)
115 		return result;
116 
117 	qstate = __cvmx_cmd_queue_get_state(queue_id);
118 	if (qstate == NULL)
119 		return CVMX_CMD_QUEUE_INVALID_PARAM;
120 
121 	/*
122 	 * We artificially limit max_depth to 1<<20 words. It is an
123 	 * arbitrary limit.
124 	 */
125 	if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
126 		if ((max_depth < 0) || (max_depth > 1 << 20))
127 			return CVMX_CMD_QUEUE_INVALID_PARAM;
128 	} else if (max_depth != 0)
129 		return CVMX_CMD_QUEUE_INVALID_PARAM;
130 
131 	if ((fpa_pool < 0) || (fpa_pool > 7))
132 		return CVMX_CMD_QUEUE_INVALID_PARAM;
133 	if ((pool_size < 128) || (pool_size > 65536))
134 		return CVMX_CMD_QUEUE_INVALID_PARAM;
135 
136 	/* See if someone else has already initialized the queue */
137 	if (qstate->base_ptr_div128) {
138 		if (max_depth != (int)qstate->max_depth) {
139 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
140 				"Queue already initialized with different "
141 				"max_depth (%d).\n",
142 			     (int)qstate->max_depth);
143 			return CVMX_CMD_QUEUE_INVALID_PARAM;
144 		}
145 		if (fpa_pool != qstate->fpa_pool) {
146 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
147 				"Queue already initialized with different "
148 				"FPA pool (%u).\n",
149 			     qstate->fpa_pool);
150 			return CVMX_CMD_QUEUE_INVALID_PARAM;
151 		}
152 		if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
153 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
154 				"Queue already initialized with different "
155 				"FPA pool size (%u).\n",
156 			     (qstate->pool_size_m1 + 1) << 3);
157 			return CVMX_CMD_QUEUE_INVALID_PARAM;
158 		}
159 		CVMX_SYNCWS;
160 		return CVMX_CMD_QUEUE_ALREADY_SETUP;
161 	} else {
162 		union cvmx_fpa_ctl_status status;
163 		void *buffer;
164 
165 		status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
166 		if (!status.s.enb) {
167 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
168 				     "FPA is not enabled.\n");
169 			return CVMX_CMD_QUEUE_NO_MEMORY;
170 		}
171 		buffer = cvmx_fpa_alloc(fpa_pool);
172 		if (buffer == NULL) {
173 			cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
174 				     "Unable to allocate initial buffer.\n");
175 			return CVMX_CMD_QUEUE_NO_MEMORY;
176 		}
177 
178 		memset(qstate, 0, sizeof(*qstate));
179 		qstate->max_depth = max_depth;
180 		qstate->fpa_pool = fpa_pool;
181 		qstate->pool_size_m1 = (pool_size >> 3) - 1;
182 		qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
183 		/*
184 		 * We zeroed the now serving field so we need to also
185 		 * zero the ticket.
186 		 */
187 		__cvmx_cmd_queue_state_ptr->
188 		    ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
189 		CVMX_SYNCWS;
190 		return CVMX_CMD_QUEUE_SUCCESS;
191 	}
192 }
193 
194 /*
195  * Shutdown a queue and free its command buffers to the FPA. The
196  * hardware connected to the queue must be stopped before this
197  * function is called.
198  *
199  * @queue_id: Queue to shutdown
200  *
201  * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
202  */
cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)203 cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
204 {
205 	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
206 	if (qptr == NULL) {
207 		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
208 			     "get queue information.\n");
209 		return CVMX_CMD_QUEUE_INVALID_PARAM;
210 	}
211 
212 	if (cvmx_cmd_queue_length(queue_id) > 0) {
213 		cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
214 			     "has data in it.\n");
215 		return CVMX_CMD_QUEUE_FULL;
216 	}
217 
218 	__cvmx_cmd_queue_lock(queue_id, qptr);
219 	if (qptr->base_ptr_div128) {
220 		cvmx_fpa_free(cvmx_phys_to_ptr
221 			      ((uint64_t) qptr->base_ptr_div128 << 7),
222 			      qptr->fpa_pool, 0);
223 		qptr->base_ptr_div128 = 0;
224 	}
225 	__cvmx_cmd_queue_unlock(qptr);
226 
227 	return CVMX_CMD_QUEUE_SUCCESS;
228 }
229 
230 /*
231  * Return the number of command words pending in the queue. This
232  * function may be relatively slow for some hardware units.
233  *
234  * @queue_id: Hardware command queue to query
235  *
236  * Returns Number of outstanding commands
237  */
cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)238 int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
239 {
240 	if (CVMX_ENABLE_PARAMETER_CHECKING) {
241 		if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
242 			return CVMX_CMD_QUEUE_INVALID_PARAM;
243 	}
244 
245 	/*
246 	 * The cast is here so gcc with check that all values in the
247 	 * cvmx_cmd_queue_id_t enumeration are here.
248 	 */
249 	switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
250 	case CVMX_CMD_QUEUE_PKO_BASE:
251 		/*
252 		 * FIXME: Need atomic lock on
253 		 * CVMX_PKO_REG_READ_IDX. Right now we are normally
254 		 * called with the queue lock, so that is a SLIGHT
255 		 * amount of protection.
256 		 */
257 		cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
258 		if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
259 			union cvmx_pko_mem_debug9 debug9;
260 			debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
261 			return debug9.cn38xx.doorbell;
262 		} else {
263 			union cvmx_pko_mem_debug8 debug8;
264 			debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
265 			return debug8.cn50xx.doorbell;
266 		}
267 	case CVMX_CMD_QUEUE_ZIP:
268 	case CVMX_CMD_QUEUE_DFA:
269 	case CVMX_CMD_QUEUE_RAID:
270 		/* FIXME: Implement other lengths */
271 		return 0;
272 	case CVMX_CMD_QUEUE_DMA_BASE:
273 		{
274 			union cvmx_npei_dmax_counts dmax_counts;
275 			dmax_counts.u64 =
276 			    cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
277 					  (queue_id & 0x7));
278 			return dmax_counts.s.dbell;
279 		}
280 	case CVMX_CMD_QUEUE_END:
281 		return CVMX_CMD_QUEUE_INVALID_PARAM;
282 	}
283 	return CVMX_CMD_QUEUE_INVALID_PARAM;
284 }
285 
286 /*
287  * Return the command buffer to be written to. The purpose of this
288  * function is to allow CVMX routine access to the low level buffer
289  * for initial hardware setup. User applications should not call this
290  * function directly.
291  *
292  * @queue_id: Command queue to query
293  *
294  * Returns Command buffer or NULL on failure
295  */
cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)296 void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
297 {
298 	__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
299 	if (qptr && qptr->base_ptr_div128)
300 		return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
301 	else
302 		return NULL;
303 }
304