1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * hci1394_q.c
29 * This code decouples some of the OpenHCI async descriptor logic/structures
30 * from the async processing. The goal was to combine as much of the
31 * duplicate code as possible for the different type of async transfers
32 * without going too overboard.
33 *
34 * There are two parts to the Q, the descriptor buffer and the data buffer.
35 * For the most part, data to be transmitted and data which is received go
36 * in the data buffers. The information of where to get the data and put
37 * the data reside in the descriptor buffers. There are exceptions to this.
38 */
39
40
41 #include <sys/types.h>
42 #include <sys/conf.h>
43 #include <sys/ddi.h>
44 #include <sys/modctl.h>
45 #include <sys/stat.h>
46 #include <sys/sunddi.h>
47 #include <sys/cmn_err.h>
48 #include <sys/kmem.h>
49 #include <sys/note.h>
50
51 #include <sys/1394/adapters/hci1394.h>
52
53
54 static int hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size,
55 uint32_t *io_addr);
56 static void hci1394_q_unreserve(hci1394_q_buf_t *qbuf);
57 static void hci1394_q_buf_setup(hci1394_q_buf_t *qbuf);
58 static void hci1394_q_reset(hci1394_q_handle_t q_handle);
59 static void hci1394_q_next_buf(hci1394_q_buf_t *qbuf);
60
61 static void hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle,
62 hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
63 uint_t hdrsize);
64 static void hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle,
65 hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
66 uint_t hdrsize);
67 static void hci1394_q_at_write_OL(hci1394_q_handle_t q_handle,
68 hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, uint32_t io_addr,
69 uint_t datasize);
70 static void hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
71 uint8_t *data, uint_t datasize);
72 static void hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf,
73 hci1394_q_cmd_t *cmd, h1394_mblk_t *mblk);
74
75 static void hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle,
76 hci1394_q_buf_t *qbuf, uint32_t io_addr, uint_t datasize);
77
78 _NOTE(SCHEME_PROTECTS_DATA("unique", msgb))
79
80 /*
81 * hci1394_q_init()
82 * Initialize a Q. A Q consists of a descriptor buffer and a data buffer and
83 * can be either an AT or AR Q. hci1394_q_init() returns a handle which
84 * should be used for the reset of the hci1394_q_* calls.
85 */
86 int
hci1394_q_init(hci1394_drvinfo_t * drvinfo,hci1394_ohci_handle_t ohci_handle,hci1394_q_info_t * qinfo,hci1394_q_handle_t * q_handle)87 hci1394_q_init(hci1394_drvinfo_t *drvinfo,
88 hci1394_ohci_handle_t ohci_handle, hci1394_q_info_t *qinfo,
89 hci1394_q_handle_t *q_handle)
90 {
91 hci1394_q_buf_t *desc;
92 hci1394_q_buf_t *data;
93 hci1394_buf_parms_t parms;
94 hci1394_q_t *q;
95 int status;
96 int index;
97
98
99 ASSERT(drvinfo != NULL);
100 ASSERT(qinfo != NULL);
101 ASSERT(q_handle != NULL);
102
103 /*
104 * allocate the memory to track this Q. Initialize the internal Q
105 * structure.
106 */
107 q = kmem_alloc(sizeof (hci1394_q_t), KM_SLEEP);
108 q->q_drvinfo = drvinfo;
109 q->q_info = *qinfo;
110 q->q_ohci = ohci_handle;
111 mutex_init(&q->q_mutex, NULL, MUTEX_DRIVER, drvinfo->di_iblock_cookie);
112 desc = &q->q_desc;
113 data = &q->q_data;
114
115 /*
116 * Allocate the Descriptor buffer.
117 *
118 * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
119 * after we have tested the multiple cookie code on x86.
120 */
121 parms.bp_length = qinfo->qi_desc_size;
122 parms.bp_max_cookies = 1;
123 parms.bp_alignment = 16;
124 status = hci1394_buf_alloc(drvinfo, &parms, &desc->qb_buf,
125 &desc->qb_buf_handle);
126 if (status != DDI_SUCCESS) {
127 mutex_destroy(&q->q_mutex);
128 kmem_free(q, sizeof (hci1394_q_t));
129 *q_handle = NULL;
130 return (DDI_FAILURE);
131 }
132
133 /* Copy in buffer cookies into our local cookie array */
134 desc->qb_cookie[0] = desc->qb_buf.bi_cookie;
135 for (index = 1; index < desc->qb_buf.bi_cookie_count; index++) {
136 ddi_dma_nextcookie(desc->qb_buf.bi_dma_handle,
137 &desc->qb_buf.bi_cookie);
138 desc->qb_cookie[index] = desc->qb_buf.bi_cookie;
139 }
140
141 /*
142 * Allocate the Data buffer.
143 *
144 * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
145 * after we have tested the multiple cookie code on x86.
146 */
147 parms.bp_length = qinfo->qi_data_size;
148 parms.bp_max_cookies = 1;
149 parms.bp_alignment = 16;
150 status = hci1394_buf_alloc(drvinfo, &parms, &data->qb_buf,
151 &data->qb_buf_handle);
152 if (status != DDI_SUCCESS) {
153 /* Free the allocated Descriptor buffer */
154 hci1394_buf_free(&desc->qb_buf_handle);
155
156 mutex_destroy(&q->q_mutex);
157 kmem_free(q, sizeof (hci1394_q_t));
158 *q_handle = NULL;
159 return (DDI_FAILURE);
160 }
161
162 /*
163 * We must have at least 2 ARQ data buffers, If we only have one, we
164 * will artificially create 2. We must have 2 so that we always have a
165 * descriptor with free data space to write AR data to. When one is
166 * empty, it will take us a bit to get a new descriptor back into the
167 * chain.
168 */
169 if ((qinfo->qi_mode == HCI1394_ARQ) &&
170 (data->qb_buf.bi_cookie_count == 1)) {
171 data->qb_buf.bi_cookie_count = 2;
172 data->qb_cookie[0] = data->qb_buf.bi_cookie;
173 data->qb_cookie[0].dmac_size /= 2;
174 data->qb_cookie[1] = data->qb_cookie[0];
175 data->qb_cookie[1].dmac_laddress =
176 data->qb_cookie[0].dmac_laddress +
177 data->qb_cookie[0].dmac_size;
178 data->qb_cookie[1].dmac_address =
179 data->qb_cookie[0].dmac_address +
180 data->qb_cookie[0].dmac_size;
181
182 /* We have more than 1 cookie or we are an AT Q */
183 } else {
184 /* Copy in buffer cookies into our local cookie array */
185 data->qb_cookie[0] = data->qb_buf.bi_cookie;
186 for (index = 1; index < data->qb_buf.bi_cookie_count; index++) {
187 ddi_dma_nextcookie(data->qb_buf.bi_dma_handle,
188 &data->qb_buf.bi_cookie);
189 data->qb_cookie[index] = data->qb_buf.bi_cookie;
190 }
191 }
192
193 /* The top and bottom of the Q are only set once */
194 desc->qb_ptrs.qp_top = desc->qb_buf.bi_kaddr;
195 desc->qb_ptrs.qp_bottom = desc->qb_buf.bi_kaddr +
196 desc->qb_buf.bi_real_length - 1;
197 data->qb_ptrs.qp_top = data->qb_buf.bi_kaddr;
198 data->qb_ptrs.qp_bottom = data->qb_buf.bi_kaddr +
199 data->qb_buf.bi_real_length - 1;
200
201 /*
202 * reset the Q pointers to their original settings. Setup IM
203 * descriptors if this is an AR Q.
204 */
205 hci1394_q_reset(q);
206
207 /* if this is an AT Q, create a queued list for the AT descriptors */
208 if (qinfo->qi_mode == HCI1394_ATQ) {
209 hci1394_tlist_init(drvinfo, NULL, &q->q_queued_list);
210 }
211
212 *q_handle = q;
213
214 return (DDI_SUCCESS);
215 }
216
217
218 /*
219 * hci1394_q_fini()
220 * Cleanup after a successful hci1394_q_init(). Notice that a pointer to the
221 * handle is used for the parameter. fini() will set your handle to NULL
222 * before returning.
223 */
224 void
hci1394_q_fini(hci1394_q_handle_t * q_handle)225 hci1394_q_fini(hci1394_q_handle_t *q_handle)
226 {
227 hci1394_q_t *q;
228
229 ASSERT(q_handle != NULL);
230
231 q = *q_handle;
232 if (q->q_info.qi_mode == HCI1394_ATQ) {
233 hci1394_tlist_fini(&q->q_queued_list);
234 }
235 mutex_destroy(&q->q_mutex);
236 hci1394_buf_free(&q->q_desc.qb_buf_handle);
237 hci1394_buf_free(&q->q_data.qb_buf_handle);
238 kmem_free(q, sizeof (hci1394_q_t));
239 *q_handle = NULL;
240 }
241
242
243 /*
244 * hci1394_q_buf_setup()
245 * Initialization of buffer pointers which are present in both the descriptor
246 * buffer and data buffer (No reason to duplicate the code)
247 */
248 static void
hci1394_q_buf_setup(hci1394_q_buf_t * qbuf)249 hci1394_q_buf_setup(hci1394_q_buf_t *qbuf)
250 {
251 ASSERT(qbuf != NULL);
252
253 /* start with the first cookie */
254 qbuf->qb_ptrs.qp_current_buf = 0;
255 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
256 qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
257 qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
258 qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
259 qbuf->qb_ptrs.qp_offset = 0;
260
261 /*
262 * The free_buf and free pointer will change everytime an ACK (of some
263 * type) is processed. Free is the last byte in the last cookie.
264 */
265 qbuf->qb_ptrs.qp_free_buf = qbuf->qb_buf.bi_cookie_count - 1;
266 qbuf->qb_ptrs.qp_free = qbuf->qb_ptrs.qp_bottom;
267
268 /*
269 * Start with no space to write descriptors. We first need to call
270 * hci1394_q_reserve() before calling hci1394_q_at_write_O*().
271 */
272 qbuf->qb_ptrs.qp_resv_size = 0;
273 }
274
275
276 /*
277 * hci1394_q_reset()
278 * Resets the buffers to an initial state. This should be called during
279 * attach and resume.
280 */
281 static void
hci1394_q_reset(hci1394_q_handle_t q_handle)282 hci1394_q_reset(hci1394_q_handle_t q_handle)
283 {
284 hci1394_q_buf_t *desc;
285 hci1394_q_buf_t *data;
286 int index;
287
288 ASSERT(q_handle != NULL);
289
290 mutex_enter(&q_handle->q_mutex);
291 desc = &q_handle->q_desc;
292 data = &q_handle->q_data;
293
294 hci1394_q_buf_setup(desc);
295 hci1394_q_buf_setup(data);
296
297 /* DMA starts off stopped, no previous descriptor to link from */
298 q_handle->q_dma_running = B_FALSE;
299 q_handle->q_block_cnt = 0;
300 q_handle->q_previous = NULL;
301
302 /* If this is an AR Q, setup IM's for the data buffers that we have */
303 if (q_handle->q_info.qi_mode == HCI1394_ARQ) {
304 /*
305 * This points to where to find the first IM descriptor. Since
306 * we just reset the pointers in hci1394_q_buf_setup(), the
307 * first IM we write below will be found at the top of the Q.
308 */
309 q_handle->q_head = desc->qb_ptrs.qp_top;
310
311 for (index = 0; index < data->qb_buf.bi_cookie_count; index++) {
312 hci1394_q_ar_write_IM(q_handle, desc,
313 data->qb_cookie[index].dmac_address,
314 data->qb_cookie[index].dmac_size);
315 }
316
317 /*
318 * The space left in the current IM is the size of the buffer.
319 * The current buffer is the first buffer added to the AR Q.
320 */
321 q_handle->q_space_left = data->qb_cookie[0].dmac_size;
322 }
323
324 mutex_exit(&q_handle->q_mutex);
325 }
326
327
328 /*
329 * hci1394_q_resume()
330 * This is called during a resume (after a successful suspend). Currently
331 * we only call reset. Since this is not a time critical function, we will
332 * leave this as a separate function to increase readability.
333 */
334 void
hci1394_q_resume(hci1394_q_handle_t q_handle)335 hci1394_q_resume(hci1394_q_handle_t q_handle)
336 {
337 ASSERT(q_handle != NULL);
338 hci1394_q_reset(q_handle);
339 }
340
341
342 /*
343 * hci1394_q_stop()
344 * This call informs us that a DMA engine has been stopped. It does not
345 * perform the actual stop. We need to know this so that when we add a
346 * new descriptor, we do a start instead of a wake.
347 */
348 void
hci1394_q_stop(hci1394_q_handle_t q_handle)349 hci1394_q_stop(hci1394_q_handle_t q_handle)
350 {
351 ASSERT(q_handle != NULL);
352 mutex_enter(&q_handle->q_mutex);
353 q_handle->q_dma_running = B_FALSE;
354 mutex_exit(&q_handle->q_mutex);
355 }
356
357
358 /*
359 * hci1394_q_reserve()
360 * Reserve space in the AT descriptor or data buffer. This ensures that we
361 * can get a contiguous buffer. Descriptors have to be in a contiguous
362 * buffer. Data does not have to be in a contiguous buffer but we do this to
363 * reduce complexity. For systems with small page sizes (e.g. x86), this
364 * could result in inefficient use of the data buffers when sending large
365 * data blocks (this only applies to non-physical block write ATREQs and
366 * block read ATRESP). Since it looks like most protocols that use large data
367 * blocks (like SPB-2), use physical transfers to do this (due to their
368 * efficiency), this will probably not be a real world problem. If it turns
369 * out to be a problem, the options are to force a single cookie for the data
370 * buffer, allow multiple cookies and have a larger data space, or change the
371 * data code to use a OMI, OM, OL descriptor sequence (instead of OMI, OL).
372 */
373 static int
hci1394_q_reserve(hci1394_q_buf_t * qbuf,uint_t size,uint32_t * io_addr)374 hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size, uint32_t *io_addr)
375 {
376 uint_t aligned_size;
377
378
379 ASSERT(qbuf != NULL);
380
381 /* Save backup of pointers in case we have to unreserve */
382 qbuf->qb_backup_ptrs = qbuf->qb_ptrs;
383
384 /*
385 * Make sure all alloc's are quadlet aligned. The data doesn't have to
386 * be, so we will force it to be.
387 */
388 aligned_size = HCI1394_ALIGN_QUAD(size);
389
390 /*
391 * if the free pointer is in the current buffer and the free pointer
392 * is below the current pointer (i.e. has not wrapped around)
393 */
394 if ((qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) &&
395 (qbuf->qb_ptrs.qp_free >= qbuf->qb_ptrs.qp_current)) {
396 /*
397 * The free pointer is in this buffer below the current pointer.
398 * Check to see if we have enough free space left.
399 */
400 if ((qbuf->qb_ptrs.qp_current + aligned_size) <=
401 qbuf->qb_ptrs.qp_free) {
402 /* Setup up our reserved size, return the IO address */
403 qbuf->qb_ptrs.qp_resv_size = aligned_size;
404 *io_addr = (uint32_t)(qbuf->qb_cookie[
405 qbuf->qb_ptrs.qp_current_buf].dmac_address +
406 qbuf->qb_ptrs.qp_offset);
407
408 /*
409 * The free pointer is in this buffer below the current pointer.
410 * We do not have enough free space for the alloc. Return
411 * failure.
412 */
413 } else {
414 qbuf->qb_ptrs.qp_resv_size = 0;
415 return (DDI_FAILURE);
416 }
417
418 /*
419 * If there is not enough room to fit in the current buffer (not
420 * including wrap around), we will go to the next buffer and check
421 * there. If we only have one buffer (i.e. one cookie), we will end up
422 * staying at the current buffer and wrapping the address back to the
423 * top.
424 */
425 } else if ((qbuf->qb_ptrs.qp_current + aligned_size) >
426 qbuf->qb_ptrs.qp_end) {
427 /* Go to the next buffer (or the top of ours for one cookie) */
428 hci1394_q_next_buf(qbuf);
429
430 /* If the free pointer is in the new current buffer */
431 if (qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) {
432 /*
433 * The free pointer is in this buffer. If we do not have
434 * enough free space for the alloc. Return failure.
435 */
436 if ((qbuf->qb_ptrs.qp_current + aligned_size) >
437 qbuf->qb_ptrs.qp_free) {
438 qbuf->qb_ptrs.qp_resv_size = 0;
439 return (DDI_FAILURE);
440 /*
441 * The free pointer is in this buffer. We have enough
442 * free space left.
443 */
444 } else {
445 /*
446 * Setup up our reserved size, return the IO
447 * address
448 */
449 qbuf->qb_ptrs.qp_resv_size = aligned_size;
450 *io_addr = (uint32_t)(qbuf->qb_cookie[
451 qbuf->qb_ptrs.qp_current_buf].dmac_address +
452 qbuf->qb_ptrs.qp_offset);
453 }
454
455 /*
456 * We switched buffers and the free pointer is still in another
457 * buffer. We have sufficient space in this buffer for the alloc
458 * after changing buffers.
459 */
460 } else {
461 /* Setup up our reserved size, return the IO address */
462 qbuf->qb_ptrs.qp_resv_size = aligned_size;
463 *io_addr = (uint32_t)(qbuf->qb_cookie[
464 qbuf->qb_ptrs.qp_current_buf].dmac_address +
465 qbuf->qb_ptrs.qp_offset);
466 }
467 /*
468 * The free pointer is in another buffer. We have sufficient space in
469 * this buffer for the alloc.
470 */
471 } else {
472 /* Setup up our reserved size, return the IO address */
473 qbuf->qb_ptrs.qp_resv_size = aligned_size;
474 *io_addr = (uint32_t)(qbuf->qb_cookie[
475 qbuf->qb_ptrs.qp_current_buf].dmac_address +
476 qbuf->qb_ptrs.qp_offset);
477 }
478
479 return (DDI_SUCCESS);
480 }
481
482 /*
483 * hci1394_q_unreserve()
484 * Set the buffer pointer to what they were before hci1394_reserve(). This
485 * will be called when we encounter errors during hci1394_q_at*().
486 */
487 static void
hci1394_q_unreserve(hci1394_q_buf_t * qbuf)488 hci1394_q_unreserve(hci1394_q_buf_t *qbuf)
489 {
490 ASSERT(qbuf != NULL);
491
492 /* Go back to pointer setting before the reserve */
493 qbuf->qb_ptrs = qbuf->qb_backup_ptrs;
494 }
495
496
497 /*
498 * hci1394_q_next_buf()
499 * Set our current buffer to the next cookie. If we only have one cookie, we
500 * will go back to the top of our buffer.
501 */
502 void
hci1394_q_next_buf(hci1394_q_buf_t * qbuf)503 hci1394_q_next_buf(hci1394_q_buf_t *qbuf)
504 {
505 ASSERT(qbuf != NULL);
506
507 /*
508 * go to the next cookie, if we are >= the cookie count, go back to the
509 * first cookie.
510 */
511 qbuf->qb_ptrs.qp_current_buf++;
512 if (qbuf->qb_ptrs.qp_current_buf >= qbuf->qb_buf.bi_cookie_count) {
513 qbuf->qb_ptrs.qp_current_buf = 0;
514 }
515
516 /* adjust the begin, end, current, and offset pointers */
517 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_end + 1;
518 if (qbuf->qb_ptrs.qp_begin > qbuf->qb_ptrs.qp_bottom) {
519 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
520 }
521 qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
522 qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
523 qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
524 qbuf->qb_ptrs.qp_offset = 0;
525 }
526
527
528 /*
529 * hci1394_q_at()
530 * Place an AT command that does NOT need the data buffer into the DMA chain.
531 * Some examples of this are quadlet read/write, PHY packets, ATREQ Block
532 * Read, and ATRESP block write. result is only valid on failure.
533 */
534 int
hci1394_q_at(hci1394_q_handle_t q_handle,hci1394_q_cmd_t * cmd,hci1394_basic_pkt_t * hdr,uint_t hdrsize,int * result)535 hci1394_q_at(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
536 hci1394_basic_pkt_t *hdr, uint_t hdrsize, int *result)
537 {
538 int status;
539 uint32_t ioaddr;
540
541
542 ASSERT(q_handle != NULL);
543 ASSERT(cmd != NULL);
544 ASSERT(hdr != NULL);
545
546 mutex_enter(&q_handle->q_mutex);
547
548 /*
549 * Check the HAL state and generation when the AT Q is locked. This
550 * will make sure that we get all the commands when we flush the Q's
551 * during a reset or shutdown.
552 */
553 if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
554 (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
555 cmd->qc_generation)) {
556 *result = H1394_STATUS_INVALID_BUSGEN;
557 mutex_exit(&q_handle->q_mutex);
558 return (DDI_FAILURE);
559 }
560
561 /* save away the argument to pass up when this command completes */
562 cmd->qc_node.tln_addr = cmd;
563
564 /* we have not written any 16 byte blocks to the descriptor yet */
565 q_handle->q_block_cnt = 0;
566
567 /* Reserve space for an OLI in the descriptor buffer */
568 status = hci1394_q_reserve(&q_handle->q_desc,
569 sizeof (hci1394_desc_imm_t), &ioaddr);
570 if (status != DDI_SUCCESS) {
571 *result = H1394_STATUS_NOMORE_SPACE;
572 mutex_exit(&q_handle->q_mutex);
573 return (DDI_FAILURE);
574 }
575
576 /* write the OLI to the descriptor buffer */
577 hci1394_q_at_write_OLI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
578
579 /* Add the AT command to the queued list */
580 hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
581
582 mutex_exit(&q_handle->q_mutex);
583
584 return (DDI_SUCCESS);
585 }
586
587
588 /*
589 * XXX - NOTE: POSSIBLE FUTURE OPTIMIZATION
590 * ATREQ Block read and write's that go through software are not very
591 * efficient (one of the reasons to use physical space). A copy is forced
592 * on all block reads due to the design of OpenHCI. Writes do not have this
593 * same restriction. This design forces a copy for writes too (we always
594 * copy into a data buffer before sending). There are many reasons for this
595 * including complexity reduction. There is a data size threshold where a
596 * copy is more expensive than mapping the data buffer address (or worse
597 * case a big enough difference where it pays to do it). However, we move
598 * block data around in mblks which means that our data may be scattered
599 * over many buffers. This adds to the complexity of mapping and setting
600 * up the OpenHCI descriptors.
601 *
602 * If someone really needs a speedup on block write ATREQs, my recommendation
603 * would be to add an additional command type at the target interface for a
604 * fast block write. The target driver would pass a mapped io addr to use.
605 * A function like "hci1394_q_at_with_ioaddr()" could be created which would
606 * be almost an exact copy of hci1394_q_at_with_data() without the
607 * hci1394_q_reserve() and hci1394_q_at_rep_put8() for the data buffer.
608 */
609
610
611 /*
612 * hci1394_q_at_with_data()
613 * Place an AT command that does need the data buffer into the DMA chain.
614 * The data is passed as a pointer to a kernel virtual address. An example of
615 * this is the lock operations. result is only valid on failure.
616 */
617 int
hci1394_q_at_with_data(hci1394_q_handle_t q_handle,hci1394_q_cmd_t * cmd,hci1394_basic_pkt_t * hdr,uint_t hdrsize,uint8_t * data,uint_t datasize,int * result)618 hci1394_q_at_with_data(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
619 hci1394_basic_pkt_t *hdr, uint_t hdrsize, uint8_t *data, uint_t datasize,
620 int *result)
621 {
622 uint32_t desc_ioaddr;
623 uint32_t data_ioaddr;
624 int status;
625
626
627 ASSERT(q_handle != NULL);
628 ASSERT(cmd != NULL);
629 ASSERT(hdr != NULL);
630 ASSERT(data != NULL);
631
632 mutex_enter(&q_handle->q_mutex);
633
634 /*
635 * Check the HAL state and generation when the AT Q is locked. This
636 * will make sure that we get all the commands when we flush the Q's
637 * during a reset or shutdown.
638 */
639 if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
640 (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
641 cmd->qc_generation)) {
642 *result = H1394_STATUS_INVALID_BUSGEN;
643 mutex_exit(&q_handle->q_mutex);
644 return (DDI_FAILURE);
645 }
646
647 /* save away the argument to pass up when this command completes */
648 cmd->qc_node.tln_addr = cmd;
649
650 /* we have not written any 16 byte blocks to the descriptor yet */
651 q_handle->q_block_cnt = 0;
652
653 /* Reserve space for an OMI and OL in the descriptor buffer */
654 status = hci1394_q_reserve(&q_handle->q_desc,
655 (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
656 &desc_ioaddr);
657 if (status != DDI_SUCCESS) {
658 *result = H1394_STATUS_NOMORE_SPACE;
659 mutex_exit(&q_handle->q_mutex);
660 return (DDI_FAILURE);
661 }
662
663 /* allocate space for data in the data buffer */
664 status = hci1394_q_reserve(&q_handle->q_data, datasize, &data_ioaddr);
665 if (status != DDI_SUCCESS) {
666 *result = H1394_STATUS_NOMORE_SPACE;
667 hci1394_q_unreserve(&q_handle->q_desc);
668 mutex_exit(&q_handle->q_mutex);
669 return (DDI_FAILURE);
670 }
671
672 /* Copy data into data buffer */
673 hci1394_q_at_rep_put8(&q_handle->q_data, cmd, data, datasize);
674
675 /* write the OMI to the descriptor buffer */
676 hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
677
678 /* write the OL to the descriptor buffer */
679 hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
680 datasize);
681
682 /* Add the AT command to the queued list */
683 hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
684
685 mutex_exit(&q_handle->q_mutex);
686
687 return (DDI_SUCCESS);
688 }
689
690
691 /*
692 * hci1394_q_at_with_mblk()
693 * Place an AT command that does need the data buffer into the DMA chain.
694 * The data is passed in mblk_t(s). Examples of this are a block write
695 * ATREQ and a block read ATRESP. The services layer and the hal use a
696 * private structure (h1394_mblk_t) to keep track of how much of the mblk
697 * to send since we may have to break the transfer up into smaller blocks.
698 * (i.e. a 1MByte block write would go out in 2KByte chunks. result is only
699 * valid on failure.
700 */
701 int
hci1394_q_at_with_mblk(hci1394_q_handle_t q_handle,hci1394_q_cmd_t * cmd,hci1394_basic_pkt_t * hdr,uint_t hdrsize,h1394_mblk_t * mblk,int * result)702 hci1394_q_at_with_mblk(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
703 hci1394_basic_pkt_t *hdr, uint_t hdrsize, h1394_mblk_t *mblk, int *result)
704 {
705 uint32_t desc_ioaddr;
706 uint32_t data_ioaddr;
707 int status;
708
709
710 ASSERT(q_handle != NULL);
711 ASSERT(cmd != NULL);
712 ASSERT(hdr != NULL);
713 ASSERT(mblk != NULL);
714
715 mutex_enter(&q_handle->q_mutex);
716
717 /*
718 * Check the HAL state and generation when the AT Q is locked. This
719 * will make sure that we get all the commands when we flush the Q's
720 * during a reset or shutdown.
721 */
722 if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
723 (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
724 cmd->qc_generation)) {
725 *result = H1394_STATUS_INVALID_BUSGEN;
726 mutex_exit(&q_handle->q_mutex);
727 return (DDI_FAILURE);
728 }
729
730 /* save away the argument to pass up when this command completes */
731 cmd->qc_node.tln_addr = cmd;
732
733 /* we have not written any 16 byte blocks to the descriptor yet */
734 q_handle->q_block_cnt = 0;
735
736 /* Reserve space for an OMI and OL in the descriptor buffer */
737 status = hci1394_q_reserve(&q_handle->q_desc,
738 (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
739 &desc_ioaddr);
740 if (status != DDI_SUCCESS) {
741 *result = H1394_STATUS_NOMORE_SPACE;
742 mutex_exit(&q_handle->q_mutex);
743 return (DDI_FAILURE);
744 }
745
746 /* Reserve space for data in the data buffer */
747 status = hci1394_q_reserve(&q_handle->q_data, mblk->length,
748 &data_ioaddr);
749 if (status != DDI_SUCCESS) {
750 *result = H1394_STATUS_NOMORE_SPACE;
751 hci1394_q_unreserve(&q_handle->q_desc);
752 mutex_exit(&q_handle->q_mutex);
753 return (DDI_FAILURE);
754 }
755
756 /* Copy mblk data into data buffer */
757 hci1394_q_at_copy_from_mblk(&q_handle->q_data, cmd, mblk);
758
759 /* write the OMI to the descriptor buffer */
760 hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
761
762 /* write the OL to the descriptor buffer */
763 hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
764 mblk->length);
765
766 /* Add the AT command to the queued list */
767 hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
768
769 mutex_exit(&q_handle->q_mutex);
770
771 return (DDI_SUCCESS);
772 }
773
774
775 /*
776 * hci1394_q_at_next()
777 * Return the next completed AT command in cmd. If flush_q is true, we will
778 * return the command regardless if it finished or not. We will flush
779 * during bus reset processing, shutdown, and detach.
780 */
781 void
hci1394_q_at_next(hci1394_q_handle_t q_handle,boolean_t flush_q,hci1394_q_cmd_t ** cmd)782 hci1394_q_at_next(hci1394_q_handle_t q_handle, boolean_t flush_q,
783 hci1394_q_cmd_t **cmd)
784 {
785 hci1394_q_buf_t *desc;
786 hci1394_q_buf_t *data;
787 hci1394_tlist_node_t *node;
788 uint32_t cmd_status;
789
790
791 ASSERT(q_handle != NULL);
792 ASSERT(cmd != NULL);
793
794 mutex_enter(&q_handle->q_mutex);
795
796 desc = &q_handle->q_desc;
797 data = &q_handle->q_data;
798
799 /* Sync descriptor buffer */
800 (void) ddi_dma_sync(desc->qb_buf.bi_dma_handle, 0,
801 desc->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
802
803 /* Look at the top cmd on the queued list (without removing it) */
804 hci1394_tlist_peek(q_handle->q_queued_list, &node);
805 if (node == NULL) {
806 /* There are no more commands left on the queued list */
807 *cmd = NULL;
808 mutex_exit(&q_handle->q_mutex);
809 return;
810 }
811
812 /*
813 * There is a command on the list, read its status and timestamp when
814 * it was sent
815 */
816 *cmd = (hci1394_q_cmd_t *)node->tln_addr;
817 cmd_status = ddi_get32(desc->qb_buf.bi_handle, (*cmd)->qc_status_addr);
818 (*cmd)->qc_timestamp = cmd_status & DESC_ST_TIMESTAMP_MASK;
819 cmd_status = HCI1394_DESC_EVT_GET(cmd_status);
820
821 /*
822 * If we are flushing the q (e.g. due to a bus reset), we will return
823 * the command regardless of its completion status. If we are not
824 * flushing the Q and we do not have status on the command (e.g. status
825 * = 0), we are done with this Q for now.
826 */
827 if (flush_q == B_FALSE) {
828 if (cmd_status == 0) {
829 *cmd = NULL;
830 mutex_exit(&q_handle->q_mutex);
831 return;
832 }
833 }
834
835 /*
836 * The command completed, remove it from the queued list. There is not
837 * a race condition to delete the node in the list here. This is the
838 * only place the node will be deleted so we do not need to check the
839 * return status.
840 */
841 (void) hci1394_tlist_delete(q_handle->q_queued_list, node);
842
843 /*
844 * Free the space used by the command in the descriptor and data
845 * buffers.
846 */
847 desc->qb_ptrs.qp_free_buf = (*cmd)->qc_descriptor_buf;
848 desc->qb_ptrs.qp_free = (*cmd)->qc_descriptor_end;
849 if ((*cmd)->qc_data_used == B_TRUE) {
850 data->qb_ptrs.qp_free_buf = (*cmd)->qc_data_buf;
851 data->qb_ptrs.qp_free = (*cmd)->qc_data_end;
852 }
853
854 /* return command status */
855 (*cmd)->qc_status = cmd_status;
856
857 mutex_exit(&q_handle->q_mutex);
858 }
859
860
861 /*
862 * hci1394_q_at_write_OMI()
863 * Write an OMI descriptor into the AT descriptor buffer passed in as qbuf.
864 * Buffer state information is stored in cmd. Use the hdr and hdr size for
865 * the additional information attached to an immediate descriptor.
866 */
867 void
hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle,hci1394_q_buf_t * qbuf,hci1394_q_cmd_t * cmd,hci1394_basic_pkt_t * hdr,uint_t hdrsize)868 hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
869 hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
870 {
871 hci1394_desc_imm_t *desc;
872 uint32_t data;
873
874
875 ASSERT(qbuf != NULL);
876 ASSERT(cmd != NULL);
877 ASSERT(hdr != NULL);
878 ASSERT(MUTEX_HELD(&q_handle->q_mutex));
879
880 /* The only valid "header" sizes for an OMI are 8 bytes or 16 bytes */
881 ASSERT((hdrsize == 8) || (hdrsize == 16));
882
883 /* Make sure enough room for OMI */
884 ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
885
886 /* Store the offset of the top of this descriptor block */
887 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
888 qbuf->qb_ptrs.qp_begin);
889
890 /* Setup OpenHCI OMI Header */
891 desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
892 data = DESC_AT_OMI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
893 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
894 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
895 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
896 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
897
898 /*
899 * Copy in 1394 header. Size is in bytes, convert it to a 32-bit word
900 * count.
901 */
902 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
903 hdrsize >> 2, DDI_DEV_AUTOINCR);
904
905 /*
906 * We wrote 2 16 byte blocks in the descriptor buffer, update the count
907 * accordingly. Update the reserved size and current pointer.
908 */
909 q_handle->q_block_cnt += 2;
910 qbuf->qb_ptrs.qp_resv_size -= sizeof (hci1394_desc_imm_t);
911 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
912 }
913
914
915 /*
916 * hci1394_q_at_write_OLI()
917 * Write an OLI descriptor into the AT descriptor buffer passed in as qbuf.
918 * Buffer state information is stored in cmd. Use the hdr and hdr size for
919 * the additional information attached to an immediate descriptor.
920 */
921 void
hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle,hci1394_q_buf_t * qbuf,hci1394_q_cmd_t * cmd,hci1394_basic_pkt_t * hdr,uint_t hdrsize)922 hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
923 hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
924 {
925 hci1394_desc_imm_t *desc;
926 uint32_t data;
927 uint32_t command_ptr;
928 uint32_t tcode;
929
930
931 ASSERT(qbuf != NULL);
932 ASSERT(cmd != NULL);
933 ASSERT(hdr != NULL);
934 ASSERT(MUTEX_HELD(&q_handle->q_mutex));
935
936 /* The only valid "header" sizes for an OLI are 8, 12, 16 bytes */
937 ASSERT((hdrsize == 8) || (hdrsize == 12) || (hdrsize == 16));
938
939 /* make sure enough room for 1 OLI */
940 ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
941
942 /* Store the offset of the top of this descriptor block */
943 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
944 qbuf->qb_ptrs.qp_begin);
945
946 /* Setup OpenHCI OLI Header */
947 desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
948 data = DESC_AT_OLI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
949 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
950 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
951 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
952 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
953
954 /* Setup 1394 Header */
955 tcode = (hdr->q1 & DESC_PKT_TCODE_MASK) >> DESC_PKT_TCODE_SHIFT;
956 if ((tcode == IEEE1394_TCODE_WRITE_QUADLET) ||
957 (tcode == IEEE1394_TCODE_READ_QUADLET_RESP)) {
958 /*
959 * if the tcode = a quadlet write, move the last quadlet as
960 * 8-bit data. All data is treated as 8-bit data (even quadlet
961 * reads and writes). Therefore, target drivers MUST take that
962 * into consideration when accessing device registers.
963 */
964 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1, 3,
965 DDI_DEV_AUTOINCR);
966 ddi_rep_put8(qbuf->qb_buf.bi_handle, (uint8_t *)&hdr->q4,
967 (uint8_t *)&desc->q4, 4, DDI_DEV_AUTOINCR);
968 } else {
969 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
970 hdrsize >> 2, DDI_DEV_AUTOINCR);
971 }
972
973 /*
974 * We wrote 2 16 byte blocks in the descriptor buffer, update the count
975 * accordingly.
976 */
977 q_handle->q_block_cnt += 2;
978
979 /*
980 * Sync buffer in case DMA engine currently running. This must be done
981 * before writing the command pointer in the previous descriptor.
982 */
983 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
984 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
985
986 /* save away the status address for quick access in at_next() */
987 cmd->qc_status_addr = &desc->status;
988
989 /*
990 * Setup the command pointer. This tells the HW where to get the
991 * descriptor we just setup. This includes the IO address along with
992 * a 4 bit 16 byte block count
993 */
994 command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
995 ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
996 DESC_Z_MASK));
997
998 /*
999 * if we previously setup a descriptor, add this new descriptor into
1000 * the previous descriptor's "next" pointer.
1001 */
1002 if (q_handle->q_previous != NULL) {
1003 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1004 command_ptr);
1005 /* Sync buffer again, this gets the command pointer */
1006 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1007 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1008 }
1009
1010 /*
1011 * this is now the previous descriptor. Update the current pointer,
1012 * clear the block count and reserved size since this is the end of
1013 * this command.
1014 */
1015 q_handle->q_previous = (hci1394_desc_t *)desc;
1016 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
1017 q_handle->q_block_cnt = 0;
1018 qbuf->qb_ptrs.qp_resv_size = 0;
1019
1020 /* save away cleanup info when we are done with the command */
1021 cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1022 cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1023
1024 /* If the DMA is not running, start it */
1025 if (q_handle->q_dma_running == B_FALSE) {
1026 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1027 command_ptr);
1028 q_handle->q_dma_running = B_TRUE;
1029 /* the DMA is running, wake it up */
1030 } else {
1031 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1032 }
1033 }
1034
1035
1036 /*
1037 * hci1394_q_at_write_OL()
1038 * Write an OL descriptor into the AT descriptor buffer passed in as qbuf.
1039 * Buffer state information is stored in cmd. The IO address of the data
1040 * buffer is passed in io_addr. Size is the size of the data to be
1041 * transferred.
1042 */
1043 void
hci1394_q_at_write_OL(hci1394_q_handle_t q_handle,hci1394_q_buf_t * qbuf,hci1394_q_cmd_t * cmd,uint32_t io_addr,uint_t size)1044 hci1394_q_at_write_OL(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1045 hci1394_q_cmd_t *cmd, uint32_t io_addr, uint_t size)
1046 {
1047 hci1394_desc_t *desc;
1048 uint32_t data;
1049 uint32_t command_ptr;
1050
1051
1052 ASSERT(q_handle != NULL);
1053 ASSERT(qbuf != NULL);
1054 ASSERT(cmd != NULL);
1055 ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1056
1057 /* make sure enough room for OL */
1058 ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_t));
1059
1060 /* Setup OpenHCI OL Header */
1061 desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1062 data = DESC_AT_OL | (size & DESC_HDR_REQCOUNT_MASK);
1063 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1064 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1065 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1066 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, 0);
1067
1068 /*
1069 * We wrote 1 16 byte block in the descriptor buffer, update the count
1070 * accordingly.
1071 */
1072 q_handle->q_block_cnt++;
1073
1074 /*
1075 * Sync buffer in case DMA engine currently running. This must be done
1076 * before writing the command pointer in the previous descriptor.
1077 */
1078 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1079 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1080
1081 /* save away the status address for quick access in at_next() */
1082 cmd->qc_status_addr = &desc->status;
1083
1084 /*
1085 * Setup the command pointer. This tells the HW where to get the
1086 * descriptor we just setup. This includes the IO address along with
1087 * a 4 bit 16 byte block count
1088 */
1089 command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1090 ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1091 DESC_Z_MASK));
1092
1093 /*
1094 * if we previously setup a descriptor, add this new descriptor into
1095 * the previous descriptor's "next" pointer.
1096 */
1097 if (q_handle->q_previous != NULL) {
1098 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1099 command_ptr);
1100 /* Sync buffer again, this gets the command pointer */
1101 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1102 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1103 }
1104
1105 /*
1106 * this is now the previous descriptor. Update the current pointer,
1107 * clear the block count and reserved size since this is the end of
1108 * this command.
1109 */
1110 q_handle->q_previous = desc;
1111 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1112 q_handle->q_block_cnt = 0;
1113 qbuf->qb_ptrs.qp_resv_size = 0;
1114
1115 /* save away cleanup info when we are done with the command */
1116 cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1117 cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1118
1119 /* If the DMA is not running, start it */
1120 if (q_handle->q_dma_running == B_FALSE) {
1121 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1122 command_ptr);
1123 q_handle->q_dma_running = B_TRUE;
1124 /* the DMA is running, wake it up */
1125 } else {
1126 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1127 }
1128 }
1129
1130
1131 /*
1132 * hci1394_q_at_rep_put8()
1133 * Copy a byte stream from a kernel virtual address (data) to a IO mapped
1134 * data buffer (qbuf). Copy datasize bytes. State information for the
1135 * data buffer is kept in cmd.
1136 */
1137 void
hci1394_q_at_rep_put8(hci1394_q_buf_t * qbuf,hci1394_q_cmd_t * cmd,uint8_t * data,uint_t datasize)1138 hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1139 uint8_t *data, uint_t datasize)
1140 {
1141 ASSERT(qbuf != NULL);
1142 ASSERT(cmd != NULL);
1143 ASSERT(data != NULL);
1144
1145 /* Make sure enough room for data */
1146 ASSERT(qbuf->qb_ptrs.qp_resv_size >= datasize);
1147
1148 /* Copy in data into the data buffer */
1149 ddi_rep_put8(qbuf->qb_buf.bi_handle, data,
1150 (uint8_t *)qbuf->qb_ptrs.qp_current, datasize, DDI_DEV_AUTOINCR);
1151
1152 /* Update the current pointer, offset, and reserved size */
1153 qbuf->qb_ptrs.qp_current += datasize;
1154 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1155 qbuf->qb_ptrs.qp_begin);
1156 qbuf->qb_ptrs.qp_resv_size -= datasize;
1157
1158 /* save away cleanup info when we are done with the command */
1159 cmd->qc_data_used = B_TRUE;
1160 cmd->qc_data_buf = qbuf->qb_ptrs.qp_current_buf;
1161 cmd->qc_data_end = qbuf->qb_ptrs.qp_current - 1;
1162
1163 /* Sync data buffer */
1164 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1165 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1166 }
1167
1168
1169 /*
1170 * hci1394_q_at_copy_from_mblk()
1171 * Copy a byte stream from a mblk(s) to a IO mapped data buffer (qbuf).
1172 * Copy mblk->length bytes. The services layer and the hal use a private
1173 * structure (h1394_mblk_t) to keep track of how much of the mblk to send
1174 * since we may have to break the transfer up into smaller blocks. (i.e. a
1175 * 1MByte block write would go out in 2KByte chunks. State information for
1176 * the data buffer is kept in cmd.
1177 */
1178 static void
hci1394_q_at_copy_from_mblk(hci1394_q_buf_t * qbuf,hci1394_q_cmd_t * cmd,h1394_mblk_t * mblk)1179 hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1180 h1394_mblk_t *mblk)
1181 {
1182 uint_t bytes_left;
1183 uint_t length;
1184
1185
1186 ASSERT(qbuf != NULL);
1187 ASSERT(cmd != NULL);
1188 ASSERT(mblk != NULL);
1189
1190 /* We return these variables to the Services Layer when we are done */
1191 mblk->next_offset = mblk->curr_offset;
1192 mblk->next_mblk = mblk->curr_mblk;
1193 bytes_left = mblk->length;
1194
1195 /* do while there are bytes left to copy */
1196 do {
1197 /*
1198 * If the entire data portion of the current block transfer is
1199 * contained within a single mblk.
1200 */
1201 if ((mblk->next_offset + bytes_left) <=
1202 (mblk->next_mblk->b_wptr)) {
1203 /* Copy the data into the data Q */
1204 hci1394_q_at_rep_put8(qbuf, cmd,
1205 (uint8_t *)mblk->next_offset, bytes_left);
1206
1207 /* increment the mblk offset */
1208 mblk->next_offset += bytes_left;
1209
1210 /* we have no more bytes to put into the buffer */
1211 bytes_left = 0;
1212
1213 /*
1214 * If our offset is at the end of data in this mblk, go
1215 * to the next mblk.
1216 */
1217 if (mblk->next_offset >= mblk->next_mblk->b_wptr) {
1218 mblk->next_mblk = mblk->next_mblk->b_cont;
1219 if (mblk->next_mblk != NULL) {
1220 mblk->next_offset =
1221 mblk->next_mblk->b_rptr;
1222 }
1223 }
1224
1225 /*
1226 * The data portion of the current block transfer is spread
1227 * across two or more mblk's
1228 */
1229 } else {
1230 /*
1231 * Figure out how much data is in this mblk.
1232 */
1233 length = mblk->next_mblk->b_wptr - mblk->next_offset;
1234
1235 /* Copy the data into the atreq data Q */
1236 hci1394_q_at_rep_put8(qbuf, cmd,
1237 (uint8_t *)mblk->next_offset, length);
1238
1239 /* update the bytes left count, go to the next mblk */
1240 bytes_left = bytes_left - length;
1241 mblk->next_mblk = mblk->next_mblk->b_cont;
1242 ASSERT(mblk->next_mblk != NULL);
1243 mblk->next_offset = mblk->next_mblk->b_rptr;
1244 }
1245 } while (bytes_left > 0);
1246 }
1247
1248
1249 /*
1250 * hci1394_q_ar_next()
1251 * Return an address to the next received AR packet. If there are no more
1252 * AR packets in the buffer, q_addr will be set to NULL.
1253 */
1254 void
hci1394_q_ar_next(hci1394_q_handle_t q_handle,uint32_t ** q_addr)1255 hci1394_q_ar_next(hci1394_q_handle_t q_handle, uint32_t **q_addr)
1256 {
1257 hci1394_desc_t *desc;
1258 hci1394_q_buf_t *descb;
1259 hci1394_q_buf_t *datab;
1260 uint32_t residual_count;
1261
1262
1263 ASSERT(q_handle != NULL);
1264 ASSERT(q_addr != NULL);
1265
1266 descb = &q_handle->q_desc;
1267 datab = &q_handle->q_data;
1268
1269 /* Sync Descriptor buffer */
1270 (void) ddi_dma_sync(descb->qb_buf.bi_dma_handle, 0,
1271 descb->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1272
1273 /*
1274 * Check residual in current IM count vs q_space_left to see if we have
1275 * received any more responses
1276 */
1277 desc = (hci1394_desc_t *)q_handle->q_head;
1278 residual_count = ddi_get32(descb->qb_buf.bi_handle, &desc->status);
1279 residual_count &= DESC_ST_RESCOUNT_MASK;
1280 if (residual_count >= q_handle->q_space_left) {
1281 /* No new packets received */
1282 *q_addr = NULL;
1283 return;
1284 }
1285
1286 /* Sync Data Q */
1287 (void) ddi_dma_sync(datab->qb_buf.bi_dma_handle, 0,
1288 datab->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1289
1290 /*
1291 * We have a new packet, return the address of the start of the
1292 * packet.
1293 */
1294 *q_addr = (uint32_t *)datab->qb_ptrs.qp_current;
1295 }
1296
1297
1298 /*
1299 * hci1394_q_ar_free()
1300 * Free the space used by the AR packet at the top of the data buffer. AR
1301 * packets are processed in the order that they are received. This will
1302 * free the oldest received packet which has not yet been freed. size is
1303 * how much space the packet takes up.
1304 */
1305 void
hci1394_q_ar_free(hci1394_q_handle_t q_handle,uint_t size)1306 hci1394_q_ar_free(hci1394_q_handle_t q_handle, uint_t size)
1307 {
1308 hci1394_q_buf_t *descb;
1309 hci1394_q_buf_t *datab;
1310
1311
1312 ASSERT(q_handle != NULL);
1313
1314 descb = &q_handle->q_desc;
1315 datab = &q_handle->q_data;
1316
1317 /*
1318 * Packet is in multiple buffers. Theoretically a buffer could be broken
1319 * in more than two buffers for an ARRESP. Since the buffers should be
1320 * in at least 4K increments this will not happen since the max packet
1321 * size is 2KBytes.
1322 */
1323 if ((datab->qb_ptrs.qp_current + size) > datab->qb_ptrs.qp_end) {
1324 /* Add IM descriptor for used buffer back into Q */
1325 hci1394_q_ar_write_IM(q_handle, descb,
1326 datab->qb_cookie[datab->qb_ptrs.qp_current_buf
1327 ].dmac_address,
1328 datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size);
1329
1330 /* Go to the next buffer */
1331 hci1394_q_next_buf(datab);
1332
1333 /* Update next buffers pointers for partial packet */
1334 size -= q_handle->q_space_left;
1335 datab->qb_ptrs.qp_current += size;
1336 q_handle->q_space_left =
1337 datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size -
1338 size;
1339
1340 /* Change the head pointer to the next IM descriptor */
1341 q_handle->q_head += sizeof (hci1394_desc_t);
1342 if ((q_handle->q_head + sizeof (hci1394_desc_t)) >
1343 (descb->qb_ptrs.qp_bottom + 1)) {
1344 q_handle->q_head = descb->qb_ptrs.qp_top;
1345 }
1346
1347 /* Packet is only in one buffer */
1348 } else {
1349 q_handle->q_space_left -= size;
1350 datab->qb_ptrs.qp_current += size;
1351 }
1352 }
1353
1354
1355 /*
1356 * hci1394_q_ar_get32()
1357 * Read a quadlet of data regardless if it is in the current buffer or has
1358 * wrapped to the top buffer. If the address passed to this routine is
1359 * passed the bottom of the data buffer, this routine will automatically
1360 * wrap back to the top of the Q and look in the correct offset from the
1361 * top. Copy the data into the kernel virtual address provided.
1362 */
1363 uint32_t
hci1394_q_ar_get32(hci1394_q_handle_t q_handle,uint32_t * addr)1364 hci1394_q_ar_get32(hci1394_q_handle_t q_handle, uint32_t *addr)
1365 {
1366 hci1394_q_buf_t *data;
1367 uintptr_t new_addr;
1368 uint32_t data32;
1369
1370
1371 ASSERT(q_handle != NULL);
1372 ASSERT(addr != NULL);
1373
1374 data = &q_handle->q_data;
1375
1376 /*
1377 * if the data has wrapped to the top of the buffer, adjust the address.
1378 */
1379 if ((uintptr_t)addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1380 new_addr = (uintptr_t)data->qb_ptrs.qp_top + ((uintptr_t)addr -
1381 ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1));
1382 data32 = ddi_get32(data->qb_buf.bi_handle,
1383 (uint32_t *)new_addr);
1384
1385 /* data is before end of buffer */
1386 } else {
1387 data32 = ddi_get32(data->qb_buf.bi_handle, addr);
1388 }
1389
1390 return (data32);
1391 }
1392
1393
1394 /*
1395 * hci1394_q_ar_rep_get8()
1396 * Read a byte stream of data regardless if it is contiguous or has partially
1397 * or fully wrapped to the top buffer. If the address passed to this routine
1398 * is passed the bottom of the data buffer, or address + size is past the
1399 * bottom of the data buffer. this routine will automatically wrap back to
1400 * the top of the Q and look in the correct offset from the top. Copy the
1401 * data into the kernel virtual address provided.
1402 */
1403 void
hci1394_q_ar_rep_get8(hci1394_q_handle_t q_handle,uint8_t * dest,uint8_t * q_addr,uint_t size)1404 hci1394_q_ar_rep_get8(hci1394_q_handle_t q_handle, uint8_t *dest,
1405 uint8_t *q_addr, uint_t size)
1406 {
1407 hci1394_q_buf_t *data;
1408 uintptr_t new_addr;
1409 uint_t new_size;
1410 uintptr_t new_dest;
1411
1412
1413 ASSERT(q_handle != NULL);
1414 ASSERT(dest != NULL);
1415 ASSERT(q_addr != NULL);
1416
1417 data = &q_handle->q_data;
1418
1419 /*
1420 * There are three cases:
1421 * 1) All of the data has wrapped.
1422 * 2) Some of the data has not wrapped and some has wrapped.
1423 * 3) None of the data has wrapped.
1424 */
1425
1426 /* All of the data has wrapped, just adjust the starting address */
1427 if ((uintptr_t)q_addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1428 new_addr = (uintptr_t)data->qb_ptrs.qp_top +
1429 ((uintptr_t)q_addr - ((uintptr_t)data->qb_ptrs.qp_bottom +
1430 (uintptr_t)1));
1431 ddi_rep_get8(data->qb_buf.bi_handle, dest, (uint8_t *)new_addr,
1432 size, DDI_DEV_AUTOINCR);
1433
1434 /*
1435 * Some of the data has wrapped. Copy the data that hasn't wrapped,
1436 * adjust the address, then copy the rest.
1437 */
1438 } else if (((uintptr_t)q_addr + (uintptr_t)size) >
1439 ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1)) {
1440 /* Copy first half */
1441 new_size = (uint_t)(((uintptr_t)data->qb_ptrs.qp_bottom +
1442 (uintptr_t)1) - (uintptr_t)q_addr);
1443 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, new_size,
1444 DDI_DEV_AUTOINCR);
1445
1446 /* copy second half */
1447 new_dest = (uintptr_t)dest + (uintptr_t)new_size;
1448 new_size = size - new_size;
1449 new_addr = (uintptr_t)data->qb_ptrs.qp_top;
1450 ddi_rep_get8(data->qb_buf.bi_handle, (uint8_t *)new_dest,
1451 (uint8_t *)new_addr, new_size, DDI_DEV_AUTOINCR);
1452
1453 /* None of the data has wrapped */
1454 } else {
1455 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, size,
1456 DDI_DEV_AUTOINCR);
1457 }
1458 }
1459
1460
1461 /*
1462 * hci1394_q_ar_copy_to_mblk()
1463 * Read a byte stream of data regardless if it is contiguous or has partially
1464 * or fully wrapped to the top buffer. If the address passed to this routine
1465 * is passed the bottom of the data buffer, or address + size is passed the
1466 * bottom of the data buffer. this routine will automatically wrap back to
1467 * the top of the Q and look in the correct offset from the top. Copy the
1468 * data into the mblk provided. The services layer and the hal use a private
1469 * structure (h1394_mblk_t) to keep track of how much of the mblk to receive
1470 * into since we may have to break the transfer up into smaller blocks.
1471 * (i.e. a 1MByte block read would go out in 2KByte requests.
1472 */
1473 void
hci1394_q_ar_copy_to_mblk(hci1394_q_handle_t q_handle,uint8_t * addr,h1394_mblk_t * mblk)1474 hci1394_q_ar_copy_to_mblk(hci1394_q_handle_t q_handle, uint8_t *addr,
1475 h1394_mblk_t *mblk)
1476 {
1477 uint8_t *new_addr;
1478 uint_t bytes_left;
1479 uint_t length;
1480
1481
1482 ASSERT(q_handle != NULL);
1483 ASSERT(addr != NULL);
1484 ASSERT(mblk != NULL);
1485
1486 /* We return these variables to the Services Layer when we are done */
1487 mblk->next_offset = mblk->curr_offset;
1488 mblk->next_mblk = mblk->curr_mblk;
1489 bytes_left = mblk->length;
1490
1491 /* the address we copy from will change as we change mblks */
1492 new_addr = addr;
1493
1494 /* do while there are bytes left to copy */
1495 do {
1496 /*
1497 * If the entire data portion of the current block transfer is
1498 * contained within a single mblk.
1499 */
1500 if ((mblk->next_offset + bytes_left) <=
1501 (mblk->next_mblk->b_datap->db_lim)) {
1502 /* Copy the data into the mblk */
1503 hci1394_q_ar_rep_get8(q_handle,
1504 (uint8_t *)mblk->next_offset, new_addr, bytes_left);
1505
1506 /* increment the offset */
1507 mblk->next_offset += bytes_left;
1508 mblk->next_mblk->b_wptr = mblk->next_offset;
1509
1510 /* we have no more bytes to put into the buffer */
1511 bytes_left = 0;
1512
1513 /*
1514 * If our offset is at the end of data in this mblk, go
1515 * to the next mblk.
1516 */
1517 if (mblk->next_offset >=
1518 mblk->next_mblk->b_datap->db_lim) {
1519 mblk->next_mblk = mblk->next_mblk->b_cont;
1520 if (mblk->next_mblk != NULL) {
1521 mblk->next_offset =
1522 mblk->next_mblk->b_wptr;
1523 }
1524 }
1525
1526 /*
1527 * The data portion of the current block transfer is spread
1528 * across two or more mblk's
1529 */
1530 } else {
1531 /* Figure out how much data is in this mblk */
1532 length = mblk->next_mblk->b_datap->db_lim -
1533 mblk->next_offset;
1534
1535 /* Copy the data into the mblk */
1536 hci1394_q_ar_rep_get8(q_handle,
1537 (uint8_t *)mblk->next_offset, new_addr, length);
1538 mblk->next_mblk->b_wptr =
1539 mblk->next_mblk->b_datap->db_lim;
1540
1541 /*
1542 * update the bytes left and address to copy from, go
1543 * to the next mblk.
1544 */
1545 bytes_left = bytes_left - length;
1546 new_addr = (uint8_t *)((uintptr_t)new_addr +
1547 (uintptr_t)length);
1548 mblk->next_mblk = mblk->next_mblk->b_cont;
1549 ASSERT(mblk->next_mblk != NULL);
1550 mblk->next_offset = mblk->next_mblk->b_wptr;
1551 }
1552 } while (bytes_left > 0);
1553 }
1554
1555
1556 /*
1557 * hci1394_q_ar_write_IM()
1558 * Write an IM descriptor into the AR descriptor buffer passed in as qbuf.
1559 * The IO address of the data buffer is passed in io_addr. datasize is the
1560 * size of the data data buffer to receive into.
1561 */
1562 void
hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle,hci1394_q_buf_t * qbuf,uint32_t io_addr,uint_t datasize)1563 hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1564 uint32_t io_addr, uint_t datasize)
1565 {
1566 hci1394_desc_t *desc;
1567 uint32_t data;
1568 uint32_t command_ptr;
1569
1570
1571 ASSERT(q_handle != NULL);
1572 ASSERT(qbuf != NULL);
1573
1574 /* Make sure enough room for IM */
1575 if ((qbuf->qb_ptrs.qp_current + sizeof (hci1394_desc_t)) >
1576 (qbuf->qb_ptrs.qp_bottom + 1)) {
1577 hci1394_q_next_buf(qbuf);
1578 } else {
1579 /* Store the offset of the top of this descriptor block */
1580 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1581 qbuf->qb_ptrs.qp_begin);
1582 }
1583
1584 /* Setup OpenHCI IM Header */
1585 desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1586 data = DESC_AR_IM | (datasize & DESC_HDR_REQCOUNT_MASK);
1587 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1588 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1589 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1590 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, datasize &
1591 DESC_ST_RESCOUNT_MASK);
1592
1593 /*
1594 * Sync buffer in case DMA engine currently running. This must be done
1595 * before writing the command pointer in the previous descriptor.
1596 */
1597 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1598 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1599
1600 /*
1601 * Setup the command pointer. This tells the HW where to get the
1602 * descriptor we just setup. This includes the IO address along with
1603 * a 4 bit 16 byte block count. We only wrote 1 16 byte block.
1604 */
1605 command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1606 ].dmac_address + qbuf->qb_ptrs.qp_offset) | 1);
1607
1608 /*
1609 * if we previously setup a descriptor, add this new descriptor into
1610 * the previous descriptor's "next" pointer.
1611 */
1612 if (q_handle->q_previous != NULL) {
1613 ddi_put32(qbuf->qb_buf.bi_handle,
1614 &q_handle->q_previous->branch, command_ptr);
1615 /* Sync buffer again, this gets the command pointer */
1616 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1617 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1618 }
1619
1620 /* this is the new previous descriptor. Update the current pointer */
1621 q_handle->q_previous = desc;
1622 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1623
1624 /* If the DMA is not running, start it */
1625 if (q_handle->q_dma_running == B_FALSE) {
1626 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1627 command_ptr);
1628 q_handle->q_dma_running = B_TRUE;
1629 /* the DMA is running, wake it up */
1630 } else {
1631 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1632 }
1633 }
1634