1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 1999-2000 by Sun Microsystems, Inc. 24 * All rights reserved. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * hci1394_async.c 31 * These routines manipulate the 1394 asynchronous dma engines. This 32 * includes incoming and outgoing reads, writes, and locks and their 33 * associated responses. 34 */ 35 36 #include <sys/conf.h> 37 #include <sys/ddi.h> 38 #include <sys/modctl.h> 39 #include <sys/stat.h> 40 #include <sys/sunddi.h> 41 #include <sys/cmn_err.h> 42 #include <sys/kmem.h> 43 #include <sys/types.h> 44 #include <sys/note.h> 45 46 #include <sys/1394/h1394.h> 47 #include <sys/1394/adapters/hci1394.h> 48 49 50 /* 51 * ASYNC_ARRESP_ACK_ERROR is or'd into the error status when we get an ACK error 52 * on an ARRESP. Since the 1394 response code overlaps with the OpenHCI ACK/EVT 53 * errors, we use this to distinguish between the errors in process_arresp(). 54 */ 55 #define ASYNC_ARRESP_ACK_ERROR 0x8000 56 57 /* Macro's to help extract 48-bit 1394 address into a uint64_t */ 58 #define HCI1394_TO_ADDR_HI(data) (((uint64_t)((data) & 0xFFFF)) << 32) 59 #define HCI1394_TO_ADDR_LO(data) ((uint64_t)((data) & 0xFFFFFFFF)) 60 61 /* 62 * Macro to convert a byte stream into a big endian quadlet or octlet or back 63 * the other way. 1394 arithmetic lock operations are done on big endian 64 * quadlets or octlets. compare swaps and bit masks are done on a byte streams. 65 * All data is treated as byte streams over the bus. These macros will convert 66 * the data to a big endian "integer" on x86 plaforms if the operation is an 67 * arithmetic lock operation. It will do nothing if it is not on x86 or is not 68 * an arithmetic lock operation. 69 */ 70 #ifdef _LITTLE_ENDIAN 71 #define HCI1394_ARITH_LOCK_SWAP32(tcode, data) \ 72 (((tcode) == CMD1394_LOCK_FETCH_ADD) || \ 73 ((tcode) == CMD1394_LOCK_BOUNDED_ADD) || \ 74 ((tcode) == CMD1394_LOCK_WRAP_ADD)) ? \ 75 (ddi_swap32(data)) : (data) 76 #define HCI1394_ARITH_LOCK_SWAP64(tcode, data) \ 77 (((tcode) == CMD1394_LOCK_FETCH_ADD) || \ 78 ((tcode) == CMD1394_LOCK_BOUNDED_ADD) || \ 79 ((tcode) == CMD1394_LOCK_WRAP_ADD)) ? \ 80 (ddi_swap64(data)) : (data) 81 #else 82 #define HCI1394_ARITH_LOCK_SWAP32(tcode, data) (data) 83 #define HCI1394_ARITH_LOCK_SWAP64(tcode, data) (data) 84 #endif 85 86 87 88 static int hci1394_async_arresp_read(hci1394_async_handle_t async_handle, 89 hci1394_basic_pkt_t *pkt, uint_t *tcode, hci1394_async_cmd_t **hcicmd, 90 uint_t *size); 91 static int hci1394_async_arresp_size_get(uint_t tcode, hci1394_q_handle_t q, 92 uint32_t *addr, uint_t *size); 93 94 static int hci1394_async_arreq_read(hci1394_async_handle_t async_handle, 95 hci1394_basic_pkt_t *pkt, uint_t *tcode, hci1394_async_cmd_t **hcicmd, 96 uint_t *size); 97 static int hci1394_async_arreq_read_qrd(hci1394_async_handle_t async_handle, 98 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size); 99 static int hci1394_async_arreq_read_qwr(hci1394_async_handle_t async_handle, 100 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size); 101 static int hci1394_async_arreq_read_brd(hci1394_async_handle_t async_handle, 102 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size); 103 static int hci1394_async_arreq_read_bwr(hci1394_async_handle_t async_handle, 104 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size); 105 static int hci1394_async_arreq_read_lck(hci1394_async_handle_t async_handle, 106 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size); 107 static int hci1394_async_arreq_read_phy(hci1394_async_handle_t async_handle, 108 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size, 109 boolean_t *bus_reset_token); 110 111 static void hci1394_async_hcicmd_init(hci1394_async_handle_t async_handle, 112 cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv, 113 hci1394_async_cmd_t **hcicmd); 114 115 static void hci1394_async_atreq_start(void *async, uint32_t command_ptr); 116 static void hci1394_async_arresp_start(void *async, uint32_t command_ptr); 117 static void hci1394_async_arreq_start(void *async, uint32_t command_ptr); 118 static void hci1394_async_atresp_start(void *async, uint32_t command_ptr); 119 120 static void hci1394_async_atreq_wake(void *async); 121 static void hci1394_async_arresp_wake(void *async); 122 static void hci1394_async_arreq_wake(void *async); 123 static void hci1394_async_atresp_wake(void *async); 124 125 static void hci1394_async_atreq_flush(hci1394_async_handle_t async_handle); 126 static void hci1394_async_arresp_flush(hci1394_async_handle_t async_handle); 127 static void hci1394_async_arreq_flush(hci1394_async_handle_t async_handle); 128 static void hci1394_async_atresp_flush(hci1394_async_handle_t async_handle); 129 static void hci1394_async_pending_list_flush(hci1394_async_handle_t 130 async_handle); 131 132 static void hci1394_async_pending_timeout(hci1394_tlist_node_t *node, 133 void *arg); 134 static uint_t hci1394_async_timeout_calc(hci1394_async_handle_t async_handle, 135 uint_t current_time); 136 137 _NOTE(SCHEME_PROTECTS_DATA("unique", msgb)) 138 139 /* 140 * hci1394_async_init() 141 * Initialize the async DMA engines and state. We init the tlabels; ATREQ 142 * pending Q; and ATREQ, ARRESP, ARREQ, and ATRESP Q's. init() returns a 143 * handle to be used in rest of the functions. 144 */ 145 int 146 hci1394_async_init(hci1394_drvinfo_t *drvinfo, 147 hci1394_ohci_handle_t ohci_handle, hci1394_csr_handle_t csr_handle, 148 hci1394_async_handle_t *async_handle) 149 { 150 hci1394_tlist_timer_t timer_info; 151 hci1394_q_info_t qinfo; 152 hci1394_async_t *async; 153 int status; 154 155 156 ASSERT(drvinfo != NULL); 157 ASSERT(ohci_handle != NULL); 158 ASSERT(csr_handle != NULL); 159 ASSERT(async_handle != NULL); 160 TNF_PROBE_0_DEBUG(hci1394_async_init_enter, HCI1394_TNF_HAL_STACK, ""); 161 162 /* alloc the space to keep track of the list */ 163 async = kmem_alloc(sizeof (hci1394_async_t), KM_SLEEP); 164 165 /* copy in parms to our local state */ 166 async->as_drvinfo = drvinfo; 167 async->as_ohci = ohci_handle; 168 async->as_csr = csr_handle; 169 async->as_flushing_arreq = B_FALSE; 170 async->as_phy_reset = 0xFFFFFFFF; 171 mutex_init(&async->as_atomic_lookup, NULL, MUTEX_DRIVER, 172 drvinfo->di_iblock_cookie); 173 174 /* 175 * Initialize the tlabels. Reclaim a bad tlabel after the split timeout 176 * has gone by. This time is in reference to the point the transaction 177 * has been marked as bad. Therefore the tlabel will be reclaimed at 178 * twice the split_timeout. (i.e. if the split timeout was set to 100mS 179 * and the transaction has timed out, 100mS has already gone by. We need 180 * to wait for 100mS more before we can reuse the tlabel. Therefore, the 181 * reclaim time is split_timeout and not split_timeout * 2. The split 182 * timeout is stored as the number of bus cycles. We need to convert 183 * this to nS since the reclaim time is passed as nS. 184 */ 185 hci1394_tlabel_init(drvinfo, OHCI_BUS_CYCLE_TO_nS( 186 hci1394_csr_split_timeout_get(csr_handle)), &async->as_tlabel); 187 188 /* 189 * Initialize ATREQ pending list. A pended ATREQ will be timed out after 190 * "split_timeout" has gone by. split timeout is in bus cycles so we 191 * need to convert that to nS for the tlist timer info. We will set the 192 * timer resolution to 1/2 of the timeout so that we will have a worst 193 * case timeout of split timeout + (1/2 * split timeout). See 194 * hci1394_tlist.h for more information about this. 195 */ 196 timer_info.tlt_timeout = 197 OHCI_BUS_CYCLE_TO_nS(hci1394_csr_split_timeout_get(csr_handle)); 198 timer_info.tlt_timer_resolution = timer_info.tlt_timeout / 2; 199 timer_info.tlt_callback = hci1394_async_pending_timeout; 200 timer_info.tlt_callback_arg = async; 201 hci1394_tlist_init(drvinfo, &timer_info, &async->as_pending_list); 202 203 /* Initialize ATREQ Q */ 204 qinfo.qi_desc_size = ASYNC_ATREQ_DESC_SIZE; 205 qinfo.qi_data_size = ASYNC_ATREQ_DATA_SIZE; 206 qinfo.qi_mode = HCI1394_ATQ; 207 qinfo.qi_start = hci1394_async_atreq_start; 208 qinfo.qi_wake = hci1394_async_atreq_wake; 209 qinfo.qi_callback_arg = async; 210 status = hci1394_q_init(drvinfo, async->as_ohci, &qinfo, 211 &async->as_atreq_q); 212 if (status != DDI_SUCCESS) { 213 mutex_destroy(&async->as_atomic_lookup); 214 hci1394_tlist_fini(&async->as_pending_list); 215 hci1394_tlabel_fini(&async->as_tlabel); 216 kmem_free(async, sizeof (hci1394_async_t)); 217 *async_handle = NULL; 218 TNF_PROBE_0(hci1394_async_q_init_fail, HCI1394_TNF_HAL_ERROR, 219 ""); 220 TNF_PROBE_0_DEBUG(hci1394_async_init_exit, 221 HCI1394_TNF_HAL_STACK, ""); 222 return (DDI_FAILURE); 223 } 224 225 /* Initialize ARRESP Q */ 226 qinfo.qi_desc_size = ASYNC_ARRESP_DESC_SIZE; 227 qinfo.qi_data_size = ASYNC_ARRESP_DATA_SIZE; 228 qinfo.qi_mode = HCI1394_ARQ; 229 qinfo.qi_start = hci1394_async_arresp_start; 230 qinfo.qi_wake = hci1394_async_arresp_wake; 231 qinfo.qi_callback_arg = async; 232 status = hci1394_q_init(drvinfo, async->as_ohci, &qinfo, 233 &async->as_arresp_q); 234 if (status != DDI_SUCCESS) { 235 mutex_destroy(&async->as_atomic_lookup); 236 hci1394_tlist_fini(&async->as_pending_list); 237 hci1394_tlabel_fini(&async->as_tlabel); 238 hci1394_q_fini(&async->as_atreq_q); 239 kmem_free(async, sizeof (hci1394_async_t)); 240 *async_handle = NULL; 241 TNF_PROBE_0(hci1394_async_q_init_fail, HCI1394_TNF_HAL_ERROR, 242 ""); 243 TNF_PROBE_0_DEBUG(hci1394_async_init_exit, 244 HCI1394_TNF_HAL_STACK, ""); 245 return (DDI_FAILURE); 246 } 247 248 /* Initialize ARREQ Q */ 249 qinfo.qi_desc_size = ASYNC_ARREQ_DESC_SIZE; 250 qinfo.qi_data_size = ASYNC_ARREQ_DATA_SIZE; 251 qinfo.qi_mode = HCI1394_ARQ; 252 qinfo.qi_start = hci1394_async_arreq_start; 253 qinfo.qi_wake = hci1394_async_arreq_wake; 254 qinfo.qi_callback_arg = async; 255 status = hci1394_q_init(drvinfo, async->as_ohci, &qinfo, 256 &async->as_arreq_q); 257 if (status != DDI_SUCCESS) { 258 mutex_destroy(&async->as_atomic_lookup); 259 hci1394_tlist_fini(&async->as_pending_list); 260 hci1394_tlabel_fini(&async->as_tlabel); 261 hci1394_q_fini(&async->as_atreq_q); 262 hci1394_q_fini(&async->as_arresp_q); 263 kmem_free(async, sizeof (hci1394_async_t)); 264 *async_handle = NULL; 265 TNF_PROBE_0(hci1394_async_q_init_fail, HCI1394_TNF_HAL_ERROR, 266 ""); 267 TNF_PROBE_0_DEBUG(hci1394_async_init_exit, 268 HCI1394_TNF_HAL_STACK, ""); 269 return (DDI_FAILURE); 270 } 271 272 /* Initialize ATRESP Q */ 273 qinfo.qi_desc_size = ASYNC_ATRESP_DESC_SIZE; 274 qinfo.qi_data_size = ASYNC_ATRESP_DATA_SIZE; 275 qinfo.qi_mode = HCI1394_ATQ; 276 qinfo.qi_start = hci1394_async_atresp_start; 277 qinfo.qi_wake = hci1394_async_atresp_wake; 278 qinfo.qi_callback_arg = async; 279 status = hci1394_q_init(drvinfo, async->as_ohci, &qinfo, 280 &async->as_atresp_q); 281 if (status != DDI_SUCCESS) { 282 mutex_destroy(&async->as_atomic_lookup); 283 hci1394_tlist_fini(&async->as_pending_list); 284 hci1394_tlabel_fini(&async->as_tlabel); 285 hci1394_q_fini(&async->as_atreq_q); 286 hci1394_q_fini(&async->as_arresp_q); 287 hci1394_q_fini(&async->as_arreq_q); 288 kmem_free(async, sizeof (hci1394_async_t)); 289 *async_handle = NULL; 290 TNF_PROBE_0(hci1394_async_q_init_fail, HCI1394_TNF_HAL_ERROR, 291 ""); 292 TNF_PROBE_0_DEBUG(hci1394_async_init_exit, 293 HCI1394_TNF_HAL_STACK, ""); 294 return (DDI_FAILURE); 295 } 296 297 *async_handle = async; 298 299 TNF_PROBE_0_DEBUG(hci1394_async_init_exit, HCI1394_TNF_HAL_STACK, ""); 300 301 return (DDI_SUCCESS); 302 } 303 304 305 /* 306 * hci1394_async_fini() 307 * Free's up the space allocated in init(). Notice that a pointer to the 308 * handle is used for the parameter. fini() will set your handle to NULL 309 * before returning. 310 */ 311 void 312 hci1394_async_fini(hci1394_async_handle_t *async_handle) 313 { 314 hci1394_async_t *async; 315 316 317 ASSERT(async_handle != NULL); 318 TNF_PROBE_0_DEBUG(hci1394_async_fini_enter, HCI1394_TNF_HAL_STACK, ""); 319 320 async = (hci1394_async_t *)*async_handle; 321 322 mutex_destroy(&async->as_atomic_lookup); 323 hci1394_tlabel_fini(&async->as_tlabel); 324 hci1394_tlist_fini(&async->as_pending_list); 325 hci1394_q_fini(&async->as_atreq_q); 326 hci1394_q_fini(&async->as_atresp_q); 327 hci1394_q_fini(&async->as_arreq_q); 328 hci1394_q_fini(&async->as_arresp_q); 329 330 kmem_free(async, sizeof (hci1394_async_t)); 331 332 /* set handle to null. This helps catch bugs. */ 333 *async_handle = NULL; 334 335 TNF_PROBE_0_DEBUG(hci1394_async_fini_exit, HCI1394_TNF_HAL_STACK, ""); 336 } 337 338 339 /* 340 * hci1394_async_suspend() 341 * The system is getting ready to be suspended. Make sure that all of 342 * the Q's are clean and that the there are no scheduled timeouts in the 343 * pending Q. 344 */ 345 void 346 hci1394_async_suspend(hci1394_async_handle_t async_handle) 347 { 348 ASSERT(async_handle != NULL); 349 TNF_PROBE_0_DEBUG(hci1394_async_suspend_enter, 350 HCI1394_TNF_HAL_STACK, ""); 351 352 /* Flush out async DMA Q's */ 353 hci1394_async_flush(async_handle); 354 355 /* Cancel any scheduled pending timeouts */ 356 hci1394_tlist_timeout_cancel(async_handle->as_pending_list); 357 358 TNF_PROBE_0_DEBUG(hci1394_async_suspend_exit, 359 HCI1394_TNF_HAL_STACK, ""); 360 } 361 362 363 /* 364 * hci1394_async_resume() 365 * Re-setup the DMA Q's during a resume after a successful suspend. The 366 * tlabels will be re-initialized during the bus reset and the pending Q will 367 * be flushed during the suspend. 368 */ 369 int 370 hci1394_async_resume(hci1394_async_handle_t async_handle) 371 { 372 ASSERT(async_handle != NULL); 373 TNF_PROBE_0_DEBUG(hci1394_async_resume_enter, 374 HCI1394_TNF_HAL_STACK, ""); 375 376 hci1394_q_resume(async_handle->as_atreq_q); 377 hci1394_q_resume(async_handle->as_atresp_q); 378 hci1394_q_resume(async_handle->as_arreq_q); 379 hci1394_q_resume(async_handle->as_arresp_q); 380 381 TNF_PROBE_0_DEBUG(hci1394_async_resume_exit, 382 HCI1394_TNF_HAL_STACK, ""); 383 384 return (DDI_SUCCESS); 385 } 386 387 388 /* 389 * hci1394_async_cmd_overhead() 390 * Return the size of the HAL private area to attach to every alloced 1394 391 * framework command. This allows us to track command state without having 392 * to alloc memory every time a command comes down the pipe. 393 */ 394 uint_t 395 hci1394_async_cmd_overhead() 396 { 397 return (sizeof (hci1394_async_cmd_t)); 398 } 399 400 401 /* 402 * hci1394_async_flush() 403 * Flush out the Async Q's and the ATREQ pending list. This is called every 404 * bus reset so that we're sync'd up with the HW and when shutting down or 405 * suspending to make sure we cleanup after all commands. 406 */ 407 void 408 hci1394_async_flush(hci1394_async_handle_t async_handle) 409 { 410 ASSERT(async_handle != NULL); 411 TNF_PROBE_0_DEBUG(hci1394_async_flush_enter, HCI1394_TNF_HAL_STACK, ""); 412 413 hci1394_async_atreq_flush(async_handle); 414 hci1394_async_arresp_flush(async_handle); 415 hci1394_async_pending_list_flush(async_handle); 416 hci1394_async_arreq_flush(async_handle); 417 hci1394_async_atresp_flush(async_handle); 418 hci1394_tlabel_reset(async_handle->as_tlabel); 419 420 TNF_PROBE_0_DEBUG(hci1394_async_flush_exit, HCI1394_TNF_HAL_STACK, ""); 421 } 422 423 424 /* 425 * hci1394_async_pending_timeout_update() 426 * Update the timeout for the pending list. This updates both the pending 427 * list timeout and time we wait to reclaim bad tlabels. timeout is the 428 * time in nS so we do not have to do any conversions. This routine will be 429 * called when the CSR split timeout registers are updated. 430 */ 431 void 432 hci1394_async_pending_timeout_update(hci1394_async_handle_t async_handle, 433 hrtime_t timeout) 434 { 435 ASSERT(async_handle != NULL); 436 TNF_PROBE_0_DEBUG(hci1394_async_pending_timeout_update_enter, 437 HCI1394_TNF_HAL_STACK, ""); 438 hci1394_tlist_timeout_update(async_handle->as_pending_list, timeout); 439 hci1394_tlabel_set_reclaim_time(async_handle->as_tlabel, timeout); 440 TNF_PROBE_0_DEBUG(hci1394_async_pending_timeout_update_exit, 441 HCI1394_TNF_HAL_STACK, ""); 442 } 443 444 445 /* 446 * hci1394_async_atreq_process() 447 * Process an atreq, if one has completed. This is called during interrupt 448 * processing and will process a completed atreq. It returns status if an 449 * atreq was processed so that the ISR knows that it needs to be called 450 * again to see if another ATREQ has completed. flush_q set to B_TRUE tells 451 * this routine to process all commands regardless of their completion 452 * status. This is used during bus reset processing to remove all commands 453 * from the Q. 454 * 455 * There are a few race conditions that we have to watch for in atreq/arresp. 456 * They all have to do with pended responses so they are not applicable in 457 * the ARREQ/ATRESP engine (since ATRESP's can't be pended). 458 * 459 * Since the race conditions only exist for pended responses, we will only 460 * talk about that sequence here. We're also going to simplify the discussion 461 * so what the code does, so it won't exactly match what we say (e.g. we 462 * don't always setup a timeout for every single command, etc.) 463 * 464 * After Q'ing up an ATREQ, we will process the result of that command in 465 * one of a couple different paths. A normal condition would be that we Q up 466 * a command, we get an ATREQ complete interrupt and look at the ATREQ 467 * result. In the case it has been pended, we setup a timeout to wait for the 468 * response. If we receive the response before the timeout, the command is 469 * done and we send the response up the chain, if we do not, the command is 470 * done and we send a timeout notification up the chain. 471 * 472 * The first race condition is when we get the timeout at the same time as 473 * the response. At first glance a mutex around the command state would 474 * solve this problem. But on a multi-processor machine, we may have the 475 * ARRESP interrupt handler(ISR) running on one processor and the timeout on 476 * another. This means that the command state could change between two 477 * reads while in the ISR. This means we need to have a little more complex 478 * logic around changing the command state and have to be careful how and 479 * when we do this. 480 * 481 * The second race condition is that we could see the ARRESP before we 482 * process the ATREQ. We could be processing a few ARRESP from previous 483 * ATREQ's when the ATREQ completes and then the ARRESP comes in. Since we 484 * already are in the interrupt handler, the ATREQ complete will not preempt 485 * us. 486 * 487 * We will never see a race condition between the ATREQ interrupt for a 488 * command and the pending timeout since the command is not being timed until 489 * this routine is run for that command. 490 */ 491 int 492 hci1394_async_atreq_process(hci1394_async_handle_t async_handle, 493 boolean_t flush_q, boolean_t *request_available) 494 { 495 hci1394_async_cmd_t *hcicmd; 496 hci1394_q_cmd_t *qcmd; 497 int cmd_status; 498 499 500 ASSERT(async_handle != NULL); 501 ASSERT(request_available != NULL); 502 503 TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_enter, 504 HCI1394_TNF_HAL_STACK, ""); 505 506 /* 507 * Get the next ATREQ that has completed (if one has). Space is free'd 508 * up in atreq_q and atreq_data_q as part of this function call. 509 */ 510 hci1394_q_at_next(async_handle->as_atreq_q, flush_q, &qcmd); 511 512 /* 513 * See if there were anymore requests on ATREQ Q. A NULL means there 514 * were no completed commands left on the Q 515 */ 516 if (qcmd == NULL) { 517 *request_available = B_FALSE; 518 TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_exit, 519 HCI1394_TNF_HAL_STACK, ""); 520 return (DDI_SUCCESS); 521 } 522 523 /* There is a completed ATREQ, setup the HAL command pointer */ 524 *request_available = B_TRUE; 525 hcicmd = (hci1394_async_cmd_t *)qcmd->qc_arg; 526 527 TNF_PROBE_1_DEBUG(hci1394_atreq_ack, HCI1394_TNF_HAL, "", tnf_uint, 528 atreq_ack, qcmd->qc_status); 529 530 /* save away the command completed timestamp for the services layer */ 531 hcicmd->ac_priv->ack_tstamp = qcmd->qc_timestamp; 532 533 /* 534 * Make sure this command has not already been processed. This command 535 * may have already received a response. If the ACK was not an ACK 536 * pending, we have a HW error (i.e. The target HW sent a response to a 537 * non-pended request). There is a race condition where the software 538 * will see and complete a response before processing it's ACK Pending. 539 * This can only happen for ACK pendings. We have seen this race 540 * condition and response to a non-pended request during real-world 541 * testing :-) 542 */ 543 if (hcicmd->ac_state != HCI1394_CMD_STATE_IN_PROGRESS) { 544 /* 545 * we already processed the ARRESP in arresp_process(), it 546 * better have been ACK pended. Otherwise the target device 547 * performed an illegal action. 548 */ 549 if (qcmd->qc_status == OHCI_ACK_PENDING) { 550 /* 551 * Tell source that their command has completed. We're 552 * done with this command. 553 * NOTE: We use ac_status which was set in 554 * process_arresp() 555 */ 556 h1394_cmd_is_complete( 557 async_handle->as_drvinfo->di_sl_private, 558 hcicmd->ac_cmd, H1394_AT_REQ, 559 hcicmd->ac_status); 560 TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_exit, 561 HCI1394_TNF_HAL_STACK, ""); 562 return (DDI_SUCCESS); 563 /* 564 * This is a HW error. Process the ACK like we never saw the 565 * response. We will do this below. 566 */ 567 } else { 568 TNF_PROBE_1(hci1394_async_ack_fail, 569 HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg, 570 "response sent to non-pended ack"); 571 } 572 } 573 574 /* 575 * if we got an ack pending, add it to the pending list and leave. We 576 * will either get an ARRESP or the pending list will timeout the 577 * response. 578 */ 579 if (qcmd->qc_status == OHCI_ACK_PENDING) { 580 hcicmd->ac_state = HCI1394_CMD_STATE_PENDING; 581 /* Add this command to the pending list */ 582 hcicmd->ac_plist_node.tln_addr = hcicmd; 583 hci1394_tlist_add(async_handle->as_pending_list, 584 &hcicmd->ac_plist_node); 585 TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_exit, 586 HCI1394_TNF_HAL_STACK, ""); 587 return (DDI_SUCCESS); 588 } 589 590 /* 591 * setup our return command status based on the ACK from the HW. See the 592 * OpenHCI 1.0 spec (table 3.2 on pg. 18) for more information about 593 * these ACK/EVT's. 594 */ 595 switch (qcmd->qc_status) { 596 case OHCI_ACK_COMPLETE: 597 cmd_status = H1394_CMD_SUCCESS; 598 break; 599 600 /* 601 * we can get a nostatus during a bus reset (i.e. we shutdown the AT 602 * engine before it flushed all the commands) 603 */ 604 case OHCI_EVT_FLUSHED: 605 case OHCI_EVT_NO_STATUS: 606 cmd_status = H1394_CMD_EBUSRESET; 607 break; 608 609 case OHCI_EVT_MISSING_ACK: 610 case OHCI_EVT_TIMEOUT: 611 TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR, 612 "", tnf_uint, nodeid, 613 IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination), 614 tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel, 615 tnf_uint, atreq_ack, qcmd->qc_status); 616 cmd_status = H1394_CMD_ETIMEOUT; 617 break; 618 619 case OHCI_ACK_BUSY_X: 620 case OHCI_ACK_BUSY_A: 621 case OHCI_ACK_BUSY_B: 622 cmd_status = H1394_CMD_EDEVICE_BUSY; 623 TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR, 624 "", tnf_uint, nodeid, 625 IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination), 626 tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel, 627 tnf_uint, atreq_ack, qcmd->qc_status); 628 break; 629 630 case OHCI_ACK_TARDY: 631 cmd_status = H1394_CMD_EDEVICE_POWERUP; 632 TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR, 633 "", tnf_uint, nodeid, 634 IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination), 635 tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel, 636 tnf_uint, atreq_ack, qcmd->qc_status); 637 break; 638 639 case OHCI_ACK_DATA_ERROR: 640 cmd_status = H1394_CMD_EDATA_ERROR; 641 TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR, 642 "", tnf_uint, nodeid, 643 IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination), 644 tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel, 645 tnf_uint, atreq_ack, qcmd->qc_status); 646 break; 647 648 case OHCI_ACK_TYPE_ERROR: 649 cmd_status = H1394_CMD_ETYPE_ERROR; 650 TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR, 651 "", tnf_uint, nodeid, 652 IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination), 653 tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel, 654 tnf_uint, atreq_ack, qcmd->qc_status); 655 break; 656 657 case OHCI_ACK_CONFLICT_ERROR: 658 cmd_status = H1394_CMD_ERSRC_CONFLICT; 659 TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR, 660 "", tnf_uint, nodeid, 661 IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination), 662 tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel, 663 tnf_uint, atreq_ack, qcmd->qc_status); 664 break; 665 666 case OHCI_ACK_ADDRESS_ERROR: 667 cmd_status = H1394_CMD_EADDR_ERROR; 668 TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR, 669 "", tnf_uint, nodeid, 670 IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination), 671 tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel, 672 tnf_uint, atreq_ack, qcmd->qc_status); 673 break; 674 675 case OHCI_EVT_UNDERRUN: 676 case OHCI_EVT_DATA_READ: 677 case OHCI_EVT_TCODE_ERR: 678 case OHCI_EVT_DESCRIPTOR_READ: 679 case OHCI_EVT_UNKNOWN: 680 default: 681 cmd_status = H1394_CMD_EUNKNOWN_ERROR; 682 TNF_PROBE_3(hci1394_atreq_ack_err, HCI1394_TNF_HAL_ERROR, 683 "", tnf_uint, nodeid, 684 IEEE1394_NODE_NUM(hcicmd->ac_tlabel.tbi_destination), 685 tnf_uint, tx_tlabel, hcicmd->ac_tlabel.tbi_tlabel, 686 tnf_uint, atreq_ack, qcmd->qc_status); 687 break; 688 } 689 690 /* 691 * Free the tlabel that was used for this transfer. We will not try and 692 * free the tlabel in the case that we already received a response or if 693 * we did not allocate one (PHY packet). If we already received a 694 * response, the tlabel would have been free'd in 695 * hci1394_async_arresp_process(). 696 */ 697 if ((hcicmd->ac_state == HCI1394_CMD_STATE_IN_PROGRESS) && 698 (hcicmd->ac_tlabel_alloc == B_TRUE)) { 699 hci1394_tlabel_free(async_handle->as_tlabel, 700 &hcicmd->ac_tlabel); 701 } 702 703 /* 704 * if we got anything other than and ACK pending, we are done w/ this 705 * transaction. 706 */ 707 hcicmd->ac_state = HCI1394_CMD_STATE_COMPLETED; 708 709 /* tell the services layer that the command has completed */ 710 h1394_cmd_is_complete(async_handle->as_drvinfo->di_sl_private, 711 hcicmd->ac_cmd, H1394_AT_REQ, cmd_status); 712 713 TNF_PROBE_0_DEBUG(hci1394_async_atreq_process_exit, 714 HCI1394_TNF_HAL_STACK, ""); 715 716 return (DDI_SUCCESS); 717 } 718 719 720 /* 721 * hci1394_async_arresp_process() 722 * Process an arresp, if one has completed. This is called during interrupt 723 * processing and will process a completed arresp. It returns status if an 724 * arresp was processed so that the ISR knows that it needs to be called 725 * again to see if another ARRESP has completed. 726 */ 727 int 728 hci1394_async_arresp_process(hci1394_async_handle_t async_handle, 729 boolean_t *response_available) 730 { 731 hci1394_async_cmd_t *hcicmd; 732 uint32_t *addr; 733 int cmd_status; 734 uint_t tcode; 735 uint_t size; 736 int status; 737 738 739 ASSERT(async_handle != NULL); 740 ASSERT(response_available != NULL); 741 742 TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_enter, 743 HCI1394_TNF_HAL_STACK, ""); 744 745 /* 746 * See if there were any responses on ARRESP Q. A NULL means there 747 * were no responses on the Q. This call does NOT free up space. We 748 * need to do that later after we figure out how much space the 749 * response takes up. 750 */ 751 hci1394_q_ar_next(async_handle->as_arresp_q, &addr); 752 if (addr == NULL) { 753 *response_available = B_FALSE; 754 TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit, 755 HCI1394_TNF_HAL_STACK, ""); 756 return (DDI_SUCCESS); 757 } 758 759 /* 760 * We got a response. Lock out pending timeout callback from marking 761 * tlabel bad. 762 */ 763 *response_available = B_TRUE; 764 mutex_enter(&async_handle->as_atomic_lookup); 765 766 /* 767 * Read in the response into the 1394 framework command. We could get a 768 * NULL for a command if we got a response with an error (i.e. tlabel 769 * that didn't match a request) This would be a successful read but with 770 * a NULL hcicmd returned. If we ever get a DDI_FAILURE, we will 771 * shutdown. 772 */ 773 status = hci1394_async_arresp_read(async_handle, 774 (hci1394_basic_pkt_t *)addr, &tcode, &hcicmd, &size); 775 if (status != DDI_SUCCESS) { 776 mutex_exit(&async_handle->as_atomic_lookup); 777 h1394_error_detected(async_handle->as_drvinfo->di_sl_private, 778 H1394_SELF_INITIATED_SHUTDOWN, NULL); 779 cmn_err(CE_WARN, "hci1394(%d): driver shutdown: " 780 "unrecoverable error interrupt detected", 781 async_handle->as_drvinfo->di_instance); 782 hci1394_shutdown(async_handle->as_drvinfo->di_dip); 783 TNF_PROBE_0(hci1394_async_arresp_read_fail, 784 HCI1394_TNF_HAL_ERROR, ""); 785 TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit, 786 HCI1394_TNF_HAL_STACK, ""); 787 return (DDI_FAILURE); 788 } 789 790 /* Free up the arresp Q space, we are done with the data */ 791 hci1394_q_ar_free(async_handle->as_arresp_q, size); 792 793 /* 794 * if we did not get a valid command response (i.e. we got a bad tlabel 795 * or something like that) we don't have anything else to do. We will 796 * say that we processed a response and will return successfully. We 797 * still may have other responses on the Q. 798 */ 799 if (hcicmd == NULL) { 800 mutex_exit(&async_handle->as_atomic_lookup); 801 TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit, 802 HCI1394_TNF_HAL_STACK, ""); 803 return (DDI_SUCCESS); 804 } 805 806 TNF_PROBE_1_DEBUG(hci1394_arresp_resp, HCI1394_TNF_HAL, "", tnf_uint, 807 atresp_resp, hcicmd->ac_status); 808 809 /* 810 * Make sure this is in the pending list. There is a small chance that 811 * we will see the response before we see the ACK PENDING. If it is the 812 * expected case, it is in the pending list. We will remove it since 813 * we are done with the command. 814 * 815 * NOTE: there is a race condition here with the pending timeout. Look 816 * at the comments before hci1394_async_atreq_process() for more info. 817 */ 818 if (hcicmd->ac_state == HCI1394_CMD_STATE_PENDING) { 819 /* remove this transfer from our the pending list */ 820 status = hci1394_tlist_delete(async_handle->as_pending_list, 821 &hcicmd->ac_plist_node); 822 if (status != DDI_SUCCESS) { 823 mutex_exit(&async_handle->as_atomic_lookup); 824 TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit, 825 HCI1394_TNF_HAL_STACK, ""); 826 return (DDI_SUCCESS); 827 } 828 } 829 830 /* allow pending timeout callback to mark tlabel as bad */ 831 mutex_exit(&async_handle->as_atomic_lookup); 832 833 /* 834 * We got a valid response that we were able to read in. Free the tlabel 835 * that was used for this transfer. 836 */ 837 hci1394_tlabel_free(async_handle->as_tlabel, &hcicmd->ac_tlabel); 838 839 /* 840 * Setup our return command status based on the RESP or ACK or SW error. 841 * See the IEEE1394-1995 spec (6.2.4.10 on pg. 159) for more information 842 * on response codes. See the OpenHCI 1.0 spec (table 3.2 on pg. 18) for 843 * more information about ACK/EVT's. ac_status could have an IEEE1394 844 * response in it, a 1394 EVT/ACK, or a special cmd1394 error for a 845 * device error caught in SW (e.g. for a block read request that got a 846 * quadlet read response). We use a special mask to separate the 847 * ACK/EVT's from the responses (ASYNC_ARRESP_ACK_ERROR). 848 */ 849 switch (hcicmd->ac_status) { 850 case IEEE1394_RESP_COMPLETE: 851 cmd_status = H1394_CMD_SUCCESS; 852 break; 853 case IEEE1394_RESP_DATA_ERROR: 854 cmd_status = H1394_CMD_EDATA_ERROR; 855 break; 856 case IEEE1394_RESP_TYPE_ERROR: 857 cmd_status = H1394_CMD_ETYPE_ERROR; 858 break; 859 case IEEE1394_RESP_CONFLICT_ERROR: 860 cmd_status = H1394_CMD_ERSRC_CONFLICT; 861 break; 862 case IEEE1394_RESP_ADDRESS_ERROR: 863 cmd_status = H1394_CMD_EADDR_ERROR; 864 break; 865 case H1394_CMD_EDEVICE_ERROR: 866 cmd_status = H1394_CMD_EDEVICE_ERROR; 867 break; 868 case OHCI_ACK_DATA_ERROR | ASYNC_ARRESP_ACK_ERROR: 869 cmd_status = H1394_CMD_EDATA_ERROR; 870 break; 871 case OHCI_ACK_TYPE_ERROR | ASYNC_ARRESP_ACK_ERROR: 872 cmd_status = H1394_CMD_ETYPE_ERROR; 873 break; 874 case OHCI_EVT_UNDERRUN | ASYNC_ARRESP_ACK_ERROR: 875 case OHCI_EVT_DATA_READ | ASYNC_ARRESP_ACK_ERROR: 876 case OHCI_EVT_TCODE_ERR | ASYNC_ARRESP_ACK_ERROR: 877 cmd_status = H1394_CMD_EUNKNOWN_ERROR; 878 break; 879 default: 880 cmd_status = H1394_CMD_EUNKNOWN_ERROR; 881 TNF_PROBE_1(hci1394_async_ack_err, HCI1394_TNF_HAL_ERROR, 882 "", tnf_uint, arresp_resp, hcicmd->ac_status); 883 break; 884 } 885 886 /* 887 * if we have already processed the atreq and put it on the pending Q 888 * (normal case), tell the services layer it completed. 889 */ 890 if (hcicmd->ac_state == HCI1394_CMD_STATE_PENDING) { 891 /* Set state indicating that we are done with this cmd */ 892 hcicmd->ac_state = HCI1394_CMD_STATE_COMPLETED; 893 894 /* tell the services lyaer the command has completed */ 895 h1394_cmd_is_complete(async_handle->as_drvinfo->di_sl_private, 896 hcicmd->ac_cmd, H1394_AT_REQ, cmd_status); 897 898 /* 899 * We have not seen the atreq status yet. We will call 900 * h1394_command_is_complete() in atreq_process() in case we did not get 901 * an ack pending (target HW error -> this is based on real world 902 * experience :-)) 903 */ 904 } else { 905 /* Set state indicating that we are done with this cmd */ 906 hcicmd->ac_state = HCI1394_CMD_STATE_COMPLETED; 907 908 /* save away the status for atreq_process() */ 909 hcicmd->ac_status = cmd_status; 910 } 911 912 TNF_PROBE_0_DEBUG(hci1394_async_arresp_process_exit, 913 HCI1394_TNF_HAL_STACK, ""); 914 915 return (DDI_SUCCESS); 916 } 917 918 919 /* 920 * hci1394_async_arreq_process() 921 * Process an arreq, if one has arrived. This is called during interrupt 922 * processing and will process an arreq that has arrived. It returns status 923 * if an arreq was processed so that the ISR knows that it needs to be 924 * called again to see if another ARREQ has arrived. 925 */ 926 int 927 hci1394_async_arreq_process(hci1394_async_handle_t async_handle, 928 boolean_t *request_available) 929 { 930 hci1394_async_cmd_t *hcicmd; 931 uint32_t *addr; 932 uint_t tcode; 933 uint_t size; 934 int status; 935 936 937 ASSERT(async_handle != NULL); 938 ASSERT(request_available != NULL); 939 940 TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_enter, 941 HCI1394_TNF_HAL_STACK, ""); 942 943 /* 944 * See if there were any requests on ARREQ Q. A NULL means there 945 * were no requests on the Q. This call does NOT free up space. We 946 * need to do that later after we figure out how much space the 947 * request takes up. 948 */ 949 hci1394_q_ar_next(async_handle->as_arreq_q, &addr); 950 if (addr == NULL) { 951 *request_available = B_FALSE; 952 TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit, 953 HCI1394_TNF_HAL_STACK, ""); 954 return (DDI_SUCCESS); 955 } 956 957 /* 958 * We got a request. Read the request into a 1394 framework command. 959 * We could get a NULL for a command if we got a request with an error 960 * (i.e. ARREQ ACK was not ack pending or ack complete). This would be a 961 * successful read but with a NULL hcicmd returned. If we ever get a 962 * DDI_FAILURE, we will shutdown. 963 */ 964 *request_available = B_TRUE; 965 status = hci1394_async_arreq_read(async_handle, 966 (hci1394_basic_pkt_t *)addr, &tcode, &hcicmd, &size); 967 if (status != DDI_SUCCESS) { 968 h1394_error_detected(async_handle->as_drvinfo->di_sl_private, 969 H1394_SELF_INITIATED_SHUTDOWN, NULL); 970 cmn_err(CE_WARN, "hci1394(%d): driver shutdown: " 971 "unrecoverable error interrupt detected", 972 async_handle->as_drvinfo->di_instance); 973 hci1394_shutdown(async_handle->as_drvinfo->di_dip); 974 TNF_PROBE_0(hci1394_async_arreq_read_fail, 975 HCI1394_TNF_HAL_ERROR, ""); 976 TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit, 977 HCI1394_TNF_HAL_STACK, ""); 978 return (DDI_FAILURE); 979 } 980 981 /* Free up the arreq Q space, we are done with the data */ 982 hci1394_q_ar_free(async_handle->as_arreq_q, size); 983 984 /* 985 * if we did not get a valid request (i.e. The ARREQ had a bad ACK 986 * or something like that) we don't have anything else to do. We will 987 * say that we processed a request and will return successfully. We 988 * still may have other requests on the Q. 989 */ 990 if (hcicmd == NULL) { 991 TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit, 992 HCI1394_TNF_HAL_STACK, ""); 993 return (DDI_SUCCESS); 994 } 995 996 /* 997 * If as_flushing_arreq is set, we do not want to send any requests up 998 * to the Services Layer. We are flushing the ARREQ until we see a bus 999 * reset token that matches the current bus generation. Free up the 1000 * alloc'd command and return success. 1001 */ 1002 if (async_handle->as_flushing_arreq == B_TRUE) { 1003 hci1394_async_response_complete(async_handle, hcicmd->ac_cmd, 1004 hcicmd->ac_priv); 1005 TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit, 1006 HCI1394_TNF_HAL_STACK, ""); 1007 return (DDI_SUCCESS); 1008 } 1009 1010 TNF_PROBE_1_DEBUG(hci1394_arreq_ack, HCI1394_TNF_HAL, "", tnf_uint, 1011 arreq_ack, hcicmd->ac_status); 1012 1013 /* 1014 * We got a valid request that we were able to read in. Call into the 1015 * services layer based on the type of request. 1016 */ 1017 switch (tcode) { 1018 case IEEE1394_TCODE_READ_QUADLET: 1019 case IEEE1394_TCODE_READ_BLOCK: 1020 h1394_read_request(async_handle->as_drvinfo->di_sl_private, 1021 hcicmd->ac_cmd); 1022 break; 1023 case IEEE1394_TCODE_WRITE_QUADLET: 1024 case IEEE1394_TCODE_WRITE_BLOCK: 1025 h1394_write_request(async_handle->as_drvinfo->di_sl_private, 1026 hcicmd->ac_cmd); 1027 break; 1028 case IEEE1394_TCODE_LOCK: 1029 h1394_lock_request(async_handle->as_drvinfo->di_sl_private, 1030 hcicmd->ac_cmd); 1031 break; 1032 case IEEE1394_TCODE_PHY: 1033 /* 1034 * OpenHCI only handles 1 PHY quadlet at a time. If a selfid 1035 * packet was received with multiple quadlets, we will treat 1036 * each quadlet as a separate call. We do not notify the 1037 * services layer through the normal command interface, we will 1038 * treat it like a command internally and then free up the 1039 * command ourselves when we are done with it. 1040 */ 1041 h1394_phy_packet(async_handle->as_drvinfo->di_sl_private, 1042 &hcicmd->ac_cmd->cmd_u.q.quadlet_data, 1, 1043 hcicmd->ac_priv->recv_tstamp); 1044 /* free alloc'd command */ 1045 hci1394_async_response_complete(async_handle, hcicmd->ac_cmd, 1046 hcicmd->ac_priv); 1047 break; 1048 default: 1049 /* free alloc'd command */ 1050 hci1394_async_response_complete(async_handle, hcicmd->ac_cmd, 1051 hcicmd->ac_priv); 1052 TNF_PROBE_1(hci1394_async_arreq_tcode_err, 1053 HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_tcode, tcode); 1054 break; 1055 } 1056 1057 TNF_PROBE_0_DEBUG(hci1394_async_arreq_process_exit, 1058 HCI1394_TNF_HAL_STACK, ""); 1059 1060 return (DDI_SUCCESS); 1061 } 1062 1063 1064 /* 1065 * hci1394_async_atresp_process() 1066 * Process an atresp, if one has completed. This is called during interrupt 1067 * processing and will process a completed atresp. It returns status if an 1068 * atresp was processed so that the ISR knows that it needs to be called 1069 * again to see if another ATRESP has completed. flush_q set to B_TRUE tells 1070 * this routine to process all commands regardless of their completion 1071 * status. This is used during bus reset processing to remove all commands 1072 * from the Q. 1073 */ 1074 int 1075 hci1394_async_atresp_process(hci1394_async_handle_t async_handle, 1076 boolean_t flush_q, boolean_t *response_available) 1077 { 1078 hci1394_async_cmd_t *hcicmd; 1079 hci1394_q_cmd_t *qcmd; 1080 int cmd_status; 1081 1082 1083 ASSERT(async_handle != NULL); 1084 ASSERT(response_available != NULL); 1085 1086 TNF_PROBE_0_DEBUG(hci1394_async_atresp_process_enter, 1087 HCI1394_TNF_HAL_STACK, ""); 1088 1089 /* 1090 * Get the next ATRESP that has completed (if one has). Space is free'd 1091 * up in atresp_q and atresp_data_q as part of this function call. 1092 */ 1093 hci1394_q_at_next(async_handle->as_atresp_q, flush_q, &qcmd); 1094 1095 /* 1096 * See if there were anymore requests on ATRESP Q. A NULL means there 1097 * were no completed commands left on the Q. 1098 */ 1099 if (qcmd == NULL) { 1100 *response_available = B_FALSE; 1101 TNF_PROBE_0_DEBUG(hci1394_async_atresp_process_exit, 1102 HCI1394_TNF_HAL_STACK, ""); 1103 return (DDI_SUCCESS); 1104 } 1105 1106 /* There is a completed ATRESP, setup the HAL command pointer */ 1107 *response_available = B_TRUE; 1108 hcicmd = (hci1394_async_cmd_t *)qcmd->qc_arg; 1109 1110 TNF_PROBE_1_DEBUG(hci1394_atresp_ack, HCI1394_TNF_HAL, "", tnf_uint, 1111 atresp_ack, qcmd->qc_status); 1112 1113 /* save away the command completed timestamp for the services layer */ 1114 hcicmd->ac_priv->ack_tstamp = qcmd->qc_timestamp; 1115 1116 /* 1117 * setup our return command status based on the ACK from the HW. See the 1118 * OpenHCI 1.0 spec (table 3.2 on pg. 18) for more information about 1119 * these ACK/EVT's. 1120 */ 1121 switch (qcmd->qc_status) { 1122 case OHCI_ACK_COMPLETE: 1123 cmd_status = H1394_CMD_SUCCESS; 1124 break; 1125 1126 /* 1127 * we can get a nostatus during a bus reset (i.e. we shutdown the AT 1128 * engine before it flushed all the commands) 1129 */ 1130 case OHCI_EVT_FLUSHED: 1131 case OHCI_EVT_NO_STATUS: 1132 cmd_status = H1394_CMD_EBUSRESET; 1133 break; 1134 1135 case OHCI_EVT_MISSING_ACK: 1136 case OHCI_EVT_TIMEOUT: 1137 cmd_status = H1394_CMD_ETIMEOUT; 1138 TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR, 1139 "", tnf_uint, atresp_ack, qcmd->qc_status); 1140 break; 1141 1142 case OHCI_ACK_BUSY_X: 1143 case OHCI_ACK_BUSY_A: 1144 case OHCI_ACK_BUSY_B: 1145 cmd_status = H1394_CMD_EDEVICE_BUSY; 1146 TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR, 1147 "", tnf_uint, atresp_ack, qcmd->qc_status); 1148 break; 1149 1150 case OHCI_ACK_TARDY: 1151 cmd_status = H1394_CMD_EDEVICE_POWERUP; 1152 TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR, 1153 "", tnf_uint, atresp_ack, qcmd->qc_status); 1154 break; 1155 1156 case OHCI_ACK_DATA_ERROR: 1157 cmd_status = H1394_CMD_EDATA_ERROR; 1158 TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR, 1159 "", tnf_uint, atresp_ack, qcmd->qc_status); 1160 break; 1161 1162 case OHCI_ACK_TYPE_ERROR: 1163 cmd_status = H1394_CMD_ETYPE_ERROR; 1164 TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR, 1165 "", tnf_uint, atresp_ack, qcmd->qc_status); 1166 break; 1167 1168 case OHCI_ACK_CONFLICT_ERROR: 1169 cmd_status = H1394_CMD_ERSRC_CONFLICT; 1170 TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR, 1171 "", tnf_uint, atresp_ack, qcmd->qc_status); 1172 break; 1173 1174 case OHCI_ACK_ADDRESS_ERROR: 1175 cmd_status = H1394_CMD_EADDR_ERROR; 1176 TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR, 1177 "", tnf_uint, atresp_ack, qcmd->qc_status); 1178 break; 1179 1180 case OHCI_EVT_UNKNOWN: 1181 cmd_status = H1394_CMD_EUNKNOWN_ERROR; 1182 TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR, 1183 "", tnf_uint, atresp_ack, qcmd->qc_status); 1184 break; 1185 1186 case OHCI_EVT_UNDERRUN: 1187 case OHCI_EVT_DATA_READ: 1188 case OHCI_EVT_TCODE_ERR: 1189 case OHCI_EVT_DESCRIPTOR_READ: 1190 default: 1191 cmd_status = H1394_CMD_EUNKNOWN_ERROR; 1192 TNF_PROBE_1(hci1394_atresp_ack_err, HCI1394_TNF_HAL_ERROR, 1193 "", tnf_uint, atresp_ack, qcmd->qc_status); 1194 break; 1195 } 1196 1197 /* tell the services layer that the command has completed */ 1198 h1394_cmd_is_complete(async_handle->as_drvinfo->di_sl_private, 1199 hcicmd->ac_cmd, H1394_AT_RESP, cmd_status); 1200 1201 TNF_PROBE_0_DEBUG(hci1394_async_atresp_process_exit, 1202 HCI1394_TNF_HAL_STACK, ""); 1203 1204 return (DDI_SUCCESS); 1205 } 1206 1207 1208 /* 1209 * hci1394_async_arresp_read() 1210 * Read ARRESP in from memory into 1394 Framework command. We read the tcode 1211 * which tells us which kind of arresp the packet is, get the size of the 1212 * response, read in the sender, tlabel, and response code, and then 1213 * lookup the command based on the sender and tlabel. Once we get the command 1214 * (corresponding to the ATREQ), we will copy the rest of the response into 1215 * that command. 1216 * 1217 * The only time this routine should return DDI_FAILURE is if it was unable 1218 * to maintain a good state in the ARRESP Q (i.e. an unknown response was 1219 * received and we can not cleanup after it.) If we detect a recoverable 1220 * error, and it doesn't make sense to pass the response up to the Services 1221 * Layer, we should return DDI_SUCCESS with hcicmd = NULL. 1222 */ 1223 static int 1224 hci1394_async_arresp_read(hci1394_async_handle_t async_handle, 1225 hci1394_basic_pkt_t *pkt, uint_t *tcode, hci1394_async_cmd_t **hcicmd, 1226 uint_t *size) 1227 { 1228 hci1394_tlabel_info_t ac_tlabel; 1229 h1394_cmd_priv_t *cmd_priv; 1230 cmd1394_cmd_t *cmd; 1231 uint32_t *status_addr; 1232 uint_t data_length; 1233 uint32_t quadlet; 1234 void *command; 1235 uint_t rcode; 1236 uint_t ack; 1237 int status; 1238 1239 1240 ASSERT(async_handle != NULL); 1241 ASSERT(pkt != NULL); 1242 ASSERT(tcode != NULL); 1243 ASSERT(hcicmd != NULL); 1244 ASSERT(size != NULL); 1245 1246 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_enter, 1247 HCI1394_TNF_HAL_STACK, ""); 1248 1249 /* read in the arresp tcode */ 1250 quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, &pkt->q1); 1251 *tcode = HCI1394_DESC_TCODE_GET(quadlet); 1252 1253 /* Get the size of the arresp */ 1254 status = hci1394_async_arresp_size_get(*tcode, 1255 async_handle->as_arresp_q, &pkt->q1, size); 1256 if (status != DDI_SUCCESS) { 1257 TNF_PROBE_0(hci1394_async_arresp_read_size_fail, 1258 HCI1394_TNF_HAL_ERROR, ""); 1259 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1260 HCI1394_TNF_HAL_STACK, ""); 1261 return (DDI_FAILURE); 1262 } 1263 1264 /* Read in the tlabel, destination, and rcode (response code) */ 1265 quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, &pkt->q1); 1266 ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet); 1267 quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, &pkt->q2); 1268 ac_tlabel.tbi_destination = HCI1394_DESC_DESTID_GET(quadlet); 1269 rcode = HCI1394_DESC_RCODE_GET(quadlet); 1270 1271 /* Lookup the ATREQ framework command this response goes with */ 1272 hci1394_tlabel_lookup(async_handle->as_tlabel, &ac_tlabel, &command); 1273 1274 /* 1275 * If there is not a cooresponding ATREQ command, this is an error. We 1276 * will ignore this response but still return success so we cleanup 1277 * after it and go on with other arresp's. This could happend if a 1278 * response was sent after the command has timed out or if the target 1279 * device is misbehaving. (we have seen both cases) 1280 */ 1281 *hcicmd = (hci1394_async_cmd_t *)command; 1282 if ((*hcicmd) == NULL) { 1283 TNF_PROBE_2(hci1394_invalid_tlabel, HCI1394_TNF_HAL_ERROR, 1284 "", tnf_uint, nodeid, 1285 IEEE1394_NODE_NUM(ac_tlabel.tbi_destination), tnf_uint, 1286 rx_tlabel, ac_tlabel.tbi_tlabel); 1287 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1288 HCI1394_TNF_HAL_STACK, ""); 1289 return (DDI_SUCCESS); 1290 } 1291 1292 /* 1293 * copy the response code into the hal private command space. Setup 1294 * shortcuts to the 1394 framework command (cmd) and the HAL/SL private 1295 * area (cmd_priv). A command is made up of 4 parts. There is the public 1296 * part which is accessable to the target driver, there is the Services 1297 * Layer private part which is only accessible to the services layer, 1298 * there is the SL/HAL private area which is where the SL and HAL share 1299 * information about a particular command, and there is the HAL private 1300 * area where we keep track of our command specific state information. 1301 */ 1302 (*hcicmd)->ac_status = rcode; 1303 cmd = (*hcicmd)->ac_cmd; 1304 cmd_priv = (*hcicmd)->ac_priv; 1305 1306 /* 1307 * Calculate the address where the status of the ARRESP and timestamp is 1308 * kept at. It is the last quadlet in the response. Save away the 1309 * timestamp. 1310 */ 1311 status_addr = (uint32_t *)((uintptr_t)pkt + (uintptr_t)*size - 1312 (uintptr_t)IEEE1394_QUADLET); 1313 quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, status_addr); 1314 cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet); 1315 1316 /* 1317 * if we did not get an ACK_COMPLETE, we will use the ack error instead 1318 * of the response in the packet for our status. We use special mask to 1319 * separate the reponses from the ACKs (ASYNC_ARRESP_ACK_ERROR). We will 1320 * return success with hcicmd set to the command so that this error gets 1321 * sent up to the Services Layer. 1322 */ 1323 ack = HCI1394_DESC_EVT_GET(quadlet); 1324 if (ack != OHCI_ACK_COMPLETE) { 1325 /* use the ack error instead of rcode for the command status */ 1326 (*hcicmd)->ac_status = ack | ASYNC_ARRESP_ACK_ERROR; 1327 TNF_PROBE_1(hci1394_arresp_bad_ack, HCI1394_TNF_HAL_ERROR, 1328 "", tnf_uint, arresp_ack, ack); 1329 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1330 HCI1394_TNF_HAL_STACK, ""); 1331 return (DDI_SUCCESS); 1332 } 1333 1334 TNF_PROBE_1_DEBUG(hci1394_atrresp_resp, HCI1394_TNF_HAL, "", tnf_uint, 1335 arresp_resp, rcode); 1336 1337 /* 1338 * If we get to this point we have gotten a valid ACK on the response 1339 * and have matched up the response with an ATREQ. Now we check the 1340 * response code. If it is not resp_complete, we do not have anything 1341 * left to look at in the response. Return successfully. 1342 */ 1343 if (rcode != IEEE1394_RESP_COMPLETE) { 1344 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1345 HCI1394_TNF_HAL_STACK, ""); 1346 return (DDI_SUCCESS); 1347 } 1348 1349 /* 1350 * Read the rest of the response (based on which kind of response it is) 1351 * into the 1394 framework command. In all of the different responses, 1352 * we check to make sure the response matches the original request. We 1353 * originally did not have this check but found a device or two which 1354 * did not behave very well and would cause us to corrupt our commands. 1355 * Now we check :-) We will return success when we get this error since 1356 * we can recover from it. 1357 */ 1358 switch (*tcode) { 1359 case IEEE1394_TCODE_WRITE_RESP: 1360 /* 1361 * make sure the ATREQ was a quadlet/block write. The same 1362 * response is sent back for those two type of ATREQs. 1363 */ 1364 if ((cmd->cmd_type != CMD1394_ASYNCH_WR_QUAD) && 1365 (cmd->cmd_type != CMD1394_ASYNCH_WR_BLOCK)) { 1366 (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR; 1367 TNF_PROBE_2(hci1394_async_arresp_lockresp_fail, 1368 HCI1394_TNF_HAL_STACK, "", tnf_string, errmsg, 1369 "Invalid response sent for write request", tnf_uint, 1370 arresp_tcode, *tcode); 1371 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1372 HCI1394_TNF_HAL_STACK, ""); 1373 return (DDI_SUCCESS); 1374 } 1375 break; 1376 1377 case IEEE1394_TCODE_READ_QUADLET_RESP: 1378 /* make sure the ATREQ was a quadlet read */ 1379 if (cmd->cmd_type != CMD1394_ASYNCH_RD_QUAD) { 1380 (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR; 1381 TNF_PROBE_2(hci1394_async_arresp_lockresp_fail, 1382 HCI1394_TNF_HAL_STACK, "", tnf_string, errmsg, 1383 "Invalid response sent for qrd request", tnf_uint, 1384 arresp_tcode, *tcode); 1385 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1386 HCI1394_TNF_HAL_STACK, ""); 1387 return (DDI_SUCCESS); 1388 } 1389 1390 /* 1391 * read the quadlet read response in. Data is treated as a byte 1392 * stream. 1393 */ 1394 hci1394_q_ar_rep_get8(async_handle->as_arresp_q, 1395 (uint8_t *)&cmd->cmd_u.q.quadlet_data, 1396 (uint8_t *)&pkt->q4, IEEE1394_QUADLET); 1397 break; 1398 1399 case IEEE1394_TCODE_READ_BLOCK_RESP: 1400 /* make sure the ATREQ was a block read */ 1401 if (cmd->cmd_type != CMD1394_ASYNCH_RD_BLOCK) { 1402 (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR; 1403 TNF_PROBE_2(hci1394_async_arresp_lockresp_fail, 1404 HCI1394_TNF_HAL_STACK, "", tnf_string, errmsg, 1405 "Invalid response sent for brd request", tnf_uint, 1406 arresp_tcode, *tcode); 1407 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1408 HCI1394_TNF_HAL_STACK, ""); 1409 return (DDI_SUCCESS); 1410 } 1411 1412 /* 1413 * read in the data length. Make sure the data length is the 1414 * same size as the read block request size that went out. 1415 */ 1416 quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, 1417 &pkt->q4); 1418 data_length = HCI1394_DESC_DATALEN_GET(quadlet); 1419 if (data_length != cmd_priv->mblk.length) { 1420 (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR; 1421 TNF_PROBE_3(hci1394_async_arresp_brdsz_fail, 1422 HCI1394_TNF_HAL_STACK, "", tnf_string, 1423 errmsg, "Block read response size is bad", 1424 tnf_uint, requested_size, cmd_priv->mblk.length, 1425 tnf_uint, response_size, data_length); 1426 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1427 HCI1394_TNF_HAL_STACK, ""); 1428 return (DDI_SUCCESS); 1429 } 1430 1431 /* Copy the read block data into the command mblk */ 1432 hci1394_q_ar_copy_to_mblk(async_handle->as_arresp_q, 1433 (uint8_t *)&pkt->q5, &cmd_priv->mblk); 1434 break; 1435 1436 case IEEE1394_TCODE_LOCK_RESP: 1437 /* read in the data length */ 1438 quadlet = hci1394_q_ar_get32(async_handle->as_arresp_q, 1439 &pkt->q4); 1440 data_length = HCI1394_DESC_DATALEN_GET(quadlet); 1441 1442 if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) { 1443 /* 1444 * read in the data length. Make sure the data length 1445 * is the valid for a lock32 response (1 quadlet) 1446 */ 1447 if (data_length != IEEE1394_QUADLET) { 1448 (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR; 1449 TNF_PROBE_2(hci1394_async_arresp_l32sz_fail, 1450 HCI1394_TNF_HAL_STACK, "", tnf_string, 1451 errmsg, "Invalid size for lock32 response", 1452 tnf_uint, data_size, data_length); 1453 TNF_PROBE_0_DEBUG( 1454 hci1394_async_arresp_read_exit, 1455 HCI1394_TNF_HAL_STACK, ""); 1456 return (DDI_SUCCESS); 1457 } 1458 1459 /* 1460 * read the lock32 response in. Data is treated as a 1461 * byte stream unless it is an arithmetic lock 1462 * operation. In that case we treat data like a 32-bit 1463 * word. 1464 */ 1465 hci1394_q_ar_rep_get8(async_handle->as_arresp_q, 1466 (uint8_t *)&cmd->cmd_u.l32.old_value, 1467 (uint8_t *)&pkt->q5, IEEE1394_QUADLET); 1468 cmd->cmd_u.l32.old_value = HCI1394_ARITH_LOCK_SWAP32( 1469 cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.old_value); 1470 1471 } else if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) { 1472 /* 1473 * read in the data length. Make sure the data length 1474 * is the valid for a lock64 response (1 octlet) 1475 */ 1476 if (data_length != IEEE1394_OCTLET) { 1477 (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR; 1478 TNF_PROBE_2(hci1394_async_arresp_l64sz_fail, 1479 HCI1394_TNF_HAL_STACK, "", tnf_string, 1480 errmsg, "Invalid size for lock64 response", 1481 tnf_uint, data_size, data_length); 1482 TNF_PROBE_0_DEBUG( 1483 hci1394_async_arresp_read_exit, 1484 HCI1394_TNF_HAL_STACK, ""); 1485 return (DDI_SUCCESS); 1486 } 1487 1488 /* 1489 * read the lock64 response in. Data is treated as a 1490 * byte stream unless it is an arithmetic lock 1491 * operation. In that case we treat data like a 64-bit 1492 * word. 1493 */ 1494 hci1394_q_ar_rep_get8(async_handle->as_arresp_q, 1495 (uint8_t *)&cmd->cmd_u.l64.old_value, 1496 (uint8_t *)&pkt->q5, IEEE1394_OCTLET); 1497 cmd->cmd_u.l64.old_value = HCI1394_ARITH_LOCK_SWAP64( 1498 cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.old_value); 1499 1500 /* 1501 * we sent out a request that was NOT a lock request and got 1502 * back a lock response. 1503 */ 1504 } else { 1505 (*hcicmd)->ac_status = H1394_CMD_EDEVICE_ERROR; 1506 TNF_PROBE_2(hci1394_async_arresp_lockresp_fail, 1507 HCI1394_TNF_HAL_STACK, "", tnf_string, errmsg, 1508 "Invalid response sent for lock request", tnf_uint, 1509 arresp_tcode, *tcode); 1510 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1511 HCI1394_TNF_HAL_STACK, ""); 1512 return (DDI_SUCCESS); 1513 } 1514 break; 1515 1516 default: 1517 /* we got a tcode that we don't know about. Return error */ 1518 TNF_PROBE_2(hci1394_async_arresp_tcode_err, 1519 HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg, 1520 "unknown ARRESP received", tnf_uint, arresp_tcode, *tcode); 1521 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1522 HCI1394_TNF_HAL_STACK, ""); 1523 return (DDI_FAILURE); 1524 } 1525 1526 TNF_PROBE_0_DEBUG(hci1394_async_arresp_read_exit, 1527 HCI1394_TNF_HAL_STACK, ""); 1528 1529 return (DDI_SUCCESS); 1530 } 1531 1532 1533 /* 1534 * hci1394_async_arreq_read() 1535 * Read ARREQ in from memory into a 1394 Framework command. Allocate a 1394 1536 * framework command, read in the ARREQ, and before passing it up to the 1537 * services layer, see if it was a valid broadcast request. 1538 * 1539 * The only time this routine should return DDI_FAILURE is if it was unable 1540 * to maintain a good state in the ARREQ Q (i.e. an unknown request was 1541 * received and we can not cleanup after it.) If we detect a recoverable 1542 * error we should return DDI_SUCCESS with hcicmd = NULL. 1543 */ 1544 static int 1545 hci1394_async_arreq_read(hci1394_async_handle_t async_handle, 1546 hci1394_basic_pkt_t *pkt, uint_t *tcode, hci1394_async_cmd_t **hcicmd, 1547 uint_t *size) 1548 { 1549 h1394_cmd_priv_t *cmd_priv; 1550 boolean_t is_reset_token; 1551 cmd1394_cmd_t *cmd; 1552 uint32_t quadlet; 1553 int status; 1554 1555 1556 ASSERT(async_handle != NULL); 1557 ASSERT(pkt != NULL); 1558 ASSERT(tcode != NULL); 1559 ASSERT(hcicmd != NULL); 1560 ASSERT(size != NULL); 1561 1562 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_enter, 1563 HCI1394_TNF_HAL_STACK, ""); 1564 1565 /* read in the arresp tcode */ 1566 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1); 1567 *tcode = HCI1394_DESC_TCODE_GET(quadlet); 1568 1569 /* 1570 * Allocated 1394 framework command. The Services layer takes care of 1571 * cacheing commands. This is called during interrupt processing so we 1572 * do not want to sleep. 1573 */ 1574 status = h1394_alloc_cmd(async_handle->as_drvinfo->di_sl_private, 1575 H1394_ALLOC_CMD_NOSLEEP, &cmd, &cmd_priv); 1576 if (status != DDI_SUCCESS) { 1577 TNF_PROBE_0(hci1394_async_arreq_read_cmdalloc_fail, 1578 HCI1394_TNF_HAL_ERROR, ""); 1579 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1580 HCI1394_TNF_HAL_STACK, ""); 1581 return (DDI_FAILURE); 1582 } 1583 1584 /* Initialize the HAL private command info */ 1585 hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, hcicmd); 1586 1587 /* 1588 * There are two generations in the command structure, one in the public 1589 * space and one in the HAL/SL private shared space. We need to fill in 1590 * both. We only use the private one internally. 1591 */ 1592 cmd_priv->bus_generation = async_handle->as_drvinfo->di_gencnt; 1593 cmd->bus_generation = async_handle->as_drvinfo->di_gencnt; 1594 1595 /* 1596 * Read the request (based on which kind of request it is) into the 1394 1597 * framework command. 1598 */ 1599 switch (*tcode) { 1600 case IEEE1394_TCODE_READ_QUADLET: 1601 /* 1602 * We got a ARREQ quadlet read request. Read in the packet. 1603 * If there is a problem with the packet (i.e. we don't get 1604 * DDI_SUCCESS), we will free up the command and return NULL in 1605 * hcicmd to indicate that we did not get a valid ARREQ to 1606 * process. 1607 */ 1608 status = hci1394_async_arreq_read_qrd(async_handle, pkt, 1609 *hcicmd, size); 1610 if (status != DDI_SUCCESS) { 1611 hci1394_async_response_complete(async_handle, cmd, 1612 cmd_priv); 1613 *hcicmd = NULL; 1614 TNF_PROBE_0(hci1394_async_arreq_read_qrd_fail, 1615 HCI1394_TNF_HAL_ERROR, ""); 1616 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1617 HCI1394_TNF_HAL_STACK, ""); 1618 return (DDI_SUCCESS); 1619 } 1620 break; 1621 1622 case IEEE1394_TCODE_WRITE_QUADLET: 1623 /* 1624 * We got a ARREQ quadlet write request. Read in the packet. 1625 * If there is a problem with the packet (i.e. we don't get 1626 * DDI_SUCCESS), we will free up the command and return NULL in 1627 * hcicmd to indicate that we did not get a valid ARREQ to 1628 * process. 1629 */ 1630 status = hci1394_async_arreq_read_qwr(async_handle, pkt, 1631 *hcicmd, size); 1632 if (status != DDI_SUCCESS) { 1633 hci1394_async_response_complete(async_handle, cmd, 1634 cmd_priv); 1635 *hcicmd = NULL; 1636 TNF_PROBE_0(hci1394_async_arreq_read_qwr_fail, 1637 HCI1394_TNF_HAL_ERROR, ""); 1638 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1639 HCI1394_TNF_HAL_STACK, ""); 1640 return (DDI_SUCCESS); 1641 } 1642 break; 1643 1644 case IEEE1394_TCODE_READ_BLOCK: 1645 /* 1646 * We got a ARREQ block read request. Read in the packet. 1647 * If there is a problem with the packet (i.e. we don't get 1648 * DDI_SUCCESS), we will free up the command and return NULL in 1649 * hcicmd to indicate that we did not get a valid ARREQ to 1650 * process. 1651 */ 1652 status = hci1394_async_arreq_read_brd(async_handle, pkt, 1653 *hcicmd, size); 1654 if (status != DDI_SUCCESS) { 1655 hci1394_async_response_complete(async_handle, cmd, 1656 cmd_priv); 1657 *hcicmd = NULL; 1658 TNF_PROBE_0(hci1394_async_arreq_read_brd_fail, 1659 HCI1394_TNF_HAL_ERROR, ""); 1660 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1661 HCI1394_TNF_HAL_STACK, ""); 1662 return (DDI_SUCCESS); 1663 } 1664 break; 1665 1666 case IEEE1394_TCODE_WRITE_BLOCK: 1667 /* 1668 * We got a ARREQ block write request. Read in the packet. 1669 * If there is a problem with the packet (i.e. we don't get 1670 * DDI_SUCCESS), we will free up the command and return NULL in 1671 * hcicmd to indicate that we did not get a valid ARREQ to 1672 * process. 1673 */ 1674 status = hci1394_async_arreq_read_bwr(async_handle, pkt, 1675 *hcicmd, size); 1676 if (status != DDI_SUCCESS) { 1677 hci1394_async_response_complete(async_handle, cmd, 1678 cmd_priv); 1679 *hcicmd = NULL; 1680 TNF_PROBE_0(hci1394_async_arreq_read_bwr_fail, 1681 HCI1394_TNF_HAL_ERROR, ""); 1682 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1683 HCI1394_TNF_HAL_STACK, ""); 1684 return (DDI_SUCCESS); 1685 } 1686 break; 1687 1688 case IEEE1394_TCODE_LOCK: 1689 /* 1690 * We got a ARREQ lock request. Read in the packet. 1691 * If there is a problem with the packet (i.e. we don't get 1692 * DDI_SUCCESS), we will free up the command and return NULL in 1693 * hcicmd to indicate that we did not get a valid ARREQ to 1694 * process. 1695 */ 1696 status = hci1394_async_arreq_read_lck(async_handle, pkt, 1697 *hcicmd, size); 1698 if (status != DDI_SUCCESS) { 1699 hci1394_async_response_complete(async_handle, cmd, 1700 cmd_priv); 1701 *hcicmd = NULL; 1702 TNF_PROBE_0(hci1394_async_arreq_read_lck_fail, 1703 HCI1394_TNF_HAL_ERROR, ""); 1704 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1705 HCI1394_TNF_HAL_STACK, ""); 1706 return (DDI_SUCCESS); 1707 } 1708 break; 1709 1710 case IEEE1394_TCODE_PHY: 1711 /* 1712 * We got a PHY packet in the ARREQ buffer. Read in the packet. 1713 * If there is a problem with the packet (i.e. we don't get 1714 * DDI_SUCCESS), we will free up the command and return NULL in 1715 * hcicmd to indicate that we did not get a valid ARREQ to 1716 * process. 1717 */ 1718 status = hci1394_async_arreq_read_phy(async_handle, pkt, 1719 *hcicmd, size, &is_reset_token); 1720 if (status != DDI_SUCCESS) { 1721 hci1394_async_response_complete(async_handle, cmd, 1722 cmd_priv); 1723 *hcicmd = NULL; 1724 TNF_PROBE_0(hci1394_async_arreq_read_phy_fail, 1725 HCI1394_TNF_HAL_ERROR, ""); 1726 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1727 HCI1394_TNF_HAL_STACK, ""); 1728 return (DDI_SUCCESS); 1729 } 1730 1731 /* 1732 * If we got a bus reset token, free up the command and return 1733 * NULL in hcicmd to indicate that we did not get a valid ARREQ 1734 * to process. 1735 */ 1736 if (is_reset_token == B_TRUE) { 1737 hci1394_async_response_complete(async_handle, cmd, 1738 cmd_priv); 1739 *hcicmd = NULL; 1740 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1741 HCI1394_TNF_HAL_STACK, ""); 1742 return (DDI_SUCCESS); 1743 } 1744 break; 1745 1746 default: 1747 /* we got a tcode that we don't know about. Return error */ 1748 TNF_PROBE_2(hci1394_async_arreq_tcode_err, 1749 HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg, 1750 "unknown ARREQ received", tnf_uint, arreq_tcode, *tcode); 1751 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1752 HCI1394_TNF_HAL_STACK, ""); 1753 return (DDI_FAILURE); 1754 } 1755 1756 /* 1757 * If this command was broadcast and it was not a write, drop the 1758 * command since it's an invalid request. We will free up the command 1759 * and return NULL in hcicmd to indicate that we did not get a valid 1760 * ARREQ to process. 1761 */ 1762 if ((((*hcicmd)->ac_dest & IEEE1394_NODE_NUM_MASK) == 1763 IEEE1394_BROADCAST_NODEID) && ((*tcode != 1764 IEEE1394_TCODE_WRITE_QUADLET) && (*tcode != 1765 IEEE1394_TCODE_WRITE_BLOCK))) { 1766 hci1394_async_response_complete(async_handle, cmd, cmd_priv); 1767 *hcicmd = NULL; 1768 TNF_PROBE_0(hci1394_async_arreq_read_bcast_fail, 1769 HCI1394_TNF_HAL_ERROR, ""); 1770 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1771 HCI1394_TNF_HAL_STACK, ""); 1772 return (DDI_SUCCESS); 1773 1774 /* 1775 * It is a valid broadcast command, set that field in the public 1776 * command structure. 1777 */ 1778 } else if ((((*hcicmd)->ac_dest & IEEE1394_NODE_NUM_MASK) == 1779 IEEE1394_BROADCAST_NODEID)) { 1780 cmd->broadcast = 1; 1781 } 1782 1783 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 1784 HCI1394_TNF_HAL_STACK, ""); 1785 1786 return (DDI_SUCCESS); 1787 } 1788 1789 1790 /* 1791 * hci1394_async_arreq_read_qrd() 1792 * Read ARREQ quadlet read into the 1394 Framework command. This routine will 1793 * return DDI_FAILURE if it was not able to read the request succesfully. 1794 */ 1795 static int 1796 hci1394_async_arreq_read_qrd(hci1394_async_handle_t async_handle, 1797 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size) 1798 { 1799 h1394_cmd_priv_t *cmd_priv; 1800 cmd1394_cmd_t *cmd; 1801 uint32_t quadlet; 1802 1803 1804 ASSERT(async_handle != NULL); 1805 ASSERT(pkt != NULL); 1806 ASSERT(hcicmd != NULL); 1807 ASSERT(size != NULL); 1808 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_enter, 1809 HCI1394_TNF_HAL_STACK, ""); 1810 1811 /* Setup shortcuts, command type, and size of request */ 1812 cmd = hcicmd->ac_cmd; 1813 cmd_priv = hcicmd->ac_priv; 1814 cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD; 1815 *size = DESC_SZ_AR_READQUAD_REQ; 1816 1817 /* 1818 * read in the ARREQ ACK/EVT, the speed, the time we received it, and 1819 * calculate the ATRESP timeout for when we send it. 1820 */ 1821 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4); 1822 hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet); 1823 cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet); 1824 cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet); 1825 hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle, 1826 cmd_priv->recv_tstamp); 1827 1828 /* 1829 * if the ARREQ ACK was bad, we were unable to successfully read in this 1830 * request. Return failure. 1831 */ 1832 if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) && 1833 (hcicmd->ac_status != OHCI_ACK_PENDING)) { 1834 TNF_PROBE_1(hci1394_async_arreq_qrd_ack_fail, 1835 HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack, 1836 hcicmd->ac_status); 1837 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_exit, 1838 HCI1394_TNF_HAL_STACK, ""); 1839 return (DDI_FAILURE); 1840 } 1841 1842 /* 1843 * Read in the tlabel and destination. We don't use an mblk for this 1844 * request. 1845 */ 1846 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1); 1847 hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet); 1848 hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet); 1849 hcicmd->ac_mblk_alloc = B_FALSE; 1850 1851 /* 1852 * Read in the sender so we know who to send the ATRESP to and read in 1853 * the 1394 48-bit address for this request. 1854 */ 1855 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2); 1856 cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet); 1857 cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet); 1858 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3); 1859 cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet); 1860 1861 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_exit, 1862 HCI1394_TNF_HAL_STACK, ""); 1863 1864 return (DDI_SUCCESS); 1865 } 1866 1867 1868 /* 1869 * hci1394_async_arreq_read_qwr() 1870 * Read ARREQ quadlet write into the 1394 Framework command. This routine 1871 * will return DDI_FAILURE if it was not able to read the request 1872 * succesfully. 1873 */ 1874 static int 1875 hci1394_async_arreq_read_qwr(hci1394_async_handle_t async_handle, 1876 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size) 1877 { 1878 h1394_cmd_priv_t *cmd_priv; 1879 cmd1394_cmd_t *cmd; 1880 uint32_t quadlet; 1881 1882 1883 ASSERT(async_handle != NULL); 1884 ASSERT(pkt != NULL); 1885 ASSERT(hcicmd != NULL); 1886 ASSERT(size != NULL); 1887 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qwr_enter, 1888 HCI1394_TNF_HAL_STACK, ""); 1889 1890 /* Setup shortcuts, command type, and size of request */ 1891 cmd = hcicmd->ac_cmd; 1892 cmd_priv = hcicmd->ac_priv; 1893 cmd->cmd_type = CMD1394_ASYNCH_WR_QUAD; 1894 *size = DESC_SZ_AR_WRITEQUAD_REQ; 1895 1896 /* 1897 * read in the ARREQ ACK/EVT, the speed, the time we received it, and 1898 * calculate the ATRESP timeout for when we send it. 1899 */ 1900 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q5); 1901 hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet); 1902 cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet); 1903 cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet); 1904 hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle, 1905 cmd_priv->recv_tstamp); 1906 1907 /* 1908 * if the ARREQ ACK was bad, we were unable to successfully read in this 1909 * request. Return failure. 1910 */ 1911 if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) && 1912 (hcicmd->ac_status != OHCI_ACK_PENDING)) { 1913 TNF_PROBE_1(hci1394_async_arreq_qwr_ack_fail, 1914 HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack, 1915 hcicmd->ac_status); 1916 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qwr_exit, 1917 HCI1394_TNF_HAL_STACK, ""); 1918 return (DDI_FAILURE); 1919 } 1920 1921 /* 1922 * Read in the tlabel and destination. We don't use an mblk for this 1923 * request. 1924 */ 1925 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1); 1926 hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet); 1927 hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet); 1928 hcicmd->ac_mblk_alloc = B_FALSE; 1929 1930 /* 1931 * Read in the sender so we know who to send the ATRESP to. Read in 1932 * the 1394 48-bit address for this request. Copy the data quadlet into 1933 * the command. The data quadlet is treated like a byte stream. 1934 */ 1935 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2); 1936 cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet); 1937 cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet); 1938 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3); 1939 cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet); 1940 hci1394_q_ar_rep_get8(async_handle->as_arreq_q, 1941 (uint8_t *)&cmd->cmd_u.q.quadlet_data, (uint8_t *)&pkt->q4, 1942 IEEE1394_QUADLET); 1943 1944 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qwr_exit, 1945 HCI1394_TNF_HAL_STACK, ""); 1946 1947 return (DDI_SUCCESS); 1948 } 1949 1950 1951 /* 1952 * hci1394_async_arreq_read_brd() 1953 * Read ARREQ block read into the 1394 Framework command. This routine will 1954 * return DDI_FAILURE if it was not able to read the request succesfully. 1955 */ 1956 static int 1957 hci1394_async_arreq_read_brd(hci1394_async_handle_t async_handle, 1958 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size) 1959 { 1960 h1394_cmd_priv_t *cmd_priv; 1961 cmd1394_cmd_t *cmd; 1962 uint32_t quadlet; 1963 1964 1965 ASSERT(async_handle != NULL); 1966 ASSERT(pkt != NULL); 1967 ASSERT(hcicmd != NULL); 1968 ASSERT(size != NULL); 1969 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_brd_enter, 1970 HCI1394_TNF_HAL_STACK, ""); 1971 1972 /* Setup shortcuts, command type, and size of request */ 1973 cmd = hcicmd->ac_cmd; 1974 cmd_priv = hcicmd->ac_priv; 1975 cmd->cmd_type = CMD1394_ASYNCH_RD_BLOCK; 1976 *size = DESC_SZ_AR_READBLOCK_REQ; 1977 1978 /* 1979 * read in the ARREQ ACK/EVT, the speed, the time we received it, and 1980 * calculate the ATRESP timeout for when we send it. 1981 */ 1982 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q5); 1983 hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet); 1984 cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet); 1985 cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet); 1986 hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle, 1987 cmd_priv->recv_tstamp); 1988 1989 /* 1990 * if the ARREQ ACK was bad, we were unable to successfully read in this 1991 * request. Return failure. 1992 */ 1993 if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) && 1994 (hcicmd->ac_status != OHCI_ACK_PENDING)) { 1995 TNF_PROBE_1(hci1394_async_arreq_brd_ack_fail, 1996 HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack, 1997 hcicmd->ac_status); 1998 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_brd_exit, 1999 HCI1394_TNF_HAL_STACK, ""); 2000 return (DDI_FAILURE); 2001 } 2002 2003 /* Read in the tlabel and destination */ 2004 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1); 2005 hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet); 2006 hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet); 2007 2008 /* 2009 * Read in the sender so we know who to send the ATRESP to. Read in 2010 * the 1394 48-bit address for this request. Read in the block data size 2011 * and allocate an mblk of that size. 2012 */ 2013 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2); 2014 cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet); 2015 cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet); 2016 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3); 2017 cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet); 2018 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4); 2019 cmd->cmd_u.b.blk_length = HCI1394_DESC_DATALEN_GET(quadlet); 2020 cmd->cmd_u.b.data_block = allocb(cmd->cmd_u.b.blk_length, 0); 2021 if (cmd->cmd_u.b.data_block == NULL) { 2022 TNF_PROBE_0(hci1394_async_arreq_brd_mblk_fail, 2023 HCI1394_TNF_HAL_ERROR, ""); 2024 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_brd_exit, 2025 HCI1394_TNF_HAL_STACK, ""); 2026 return (DDI_FAILURE); 2027 } 2028 hcicmd->ac_mblk_alloc = B_TRUE; 2029 2030 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_brd_exit, 2031 HCI1394_TNF_HAL_STACK, ""); 2032 2033 return (DDI_SUCCESS); 2034 } 2035 2036 2037 /* 2038 * hci1394_async_arreq_read_bwr() 2039 * Read ARREQ block write into the 1394 Framework command. This routine will 2040 * return DDI_FAILURE if it was not able to read the request succesfully. 2041 */ 2042 static int 2043 hci1394_async_arreq_read_bwr(hci1394_async_handle_t async_handle, 2044 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size) 2045 { 2046 h1394_cmd_priv_t *cmd_priv; 2047 uint32_t *local_addr; 2048 cmd1394_cmd_t *cmd; 2049 uint32_t quadlet; 2050 2051 2052 ASSERT(async_handle != NULL); 2053 ASSERT(pkt != NULL); 2054 ASSERT(hcicmd != NULL); 2055 ASSERT(size != NULL); 2056 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_bwr_enter, 2057 HCI1394_TNF_HAL_STACK, ""); 2058 2059 /* 2060 * Setup shortcuts, command type, and size of request. The size of the 2061 * request is in quadlets, therefore we need to make sure we count in 2062 * the padding when figureing out the size (i.e. data may be in bytes 2063 * but the HW always pads to quadlets) 2064 */ 2065 cmd = hcicmd->ac_cmd; 2066 cmd_priv = hcicmd->ac_priv; 2067 cmd->cmd_type = CMD1394_ASYNCH_WR_BLOCK; 2068 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4); 2069 cmd->cmd_u.b.blk_length = HCI1394_DESC_DATALEN_GET(quadlet); 2070 *size = DESC_SZ_AR_WRITEBLOCK_REQ + 2071 HCI1394_ALIGN_QUAD(cmd->cmd_u.b.blk_length); 2072 2073 /* 2074 * read in the ARREQ ACK/EVT, the speed, the time we received it, and 2075 * calculate the ATRESP timeout for when we send it. The status word is 2076 * the last quadlet in the packet. 2077 */ 2078 local_addr = (uint32_t *)(((uintptr_t)(&pkt->q5)) + 2079 ((uintptr_t)HCI1394_ALIGN_QUAD(cmd->cmd_u.b.blk_length))); 2080 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, local_addr); 2081 hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet); 2082 cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet); 2083 cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet); 2084 hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle, 2085 cmd_priv->recv_tstamp); 2086 2087 /* 2088 * if the ARREQ ACK was bad, we were unable to successfully read in this 2089 * request. Return failure. 2090 */ 2091 if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) && 2092 (hcicmd->ac_status != OHCI_ACK_PENDING)) { 2093 TNF_PROBE_1(hci1394_async_arreq_bwr_ack_fail, 2094 HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack, 2095 hcicmd->ac_status); 2096 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_bwr_exit, 2097 HCI1394_TNF_HAL_STACK, ""); 2098 return (DDI_FAILURE); 2099 } 2100 2101 /* Read in the tlabel and destination */ 2102 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1); 2103 hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet); 2104 hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet); 2105 2106 /* 2107 * Read in the sender so we know who to send the ATRESP to. Read in 2108 * the 1394 48-bit address for this request. Read in the block data size 2109 * and allocate an mblk of that size. 2110 */ 2111 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2); 2112 cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet); 2113 cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet); 2114 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3); 2115 cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet); 2116 cmd->cmd_u.b.data_block = allocb(cmd->cmd_u.b.blk_length, 0); 2117 if (cmd->cmd_u.b.data_block == NULL) { 2118 TNF_PROBE_0(hci1394_async_arreq_bwr_mblk_fail, 2119 HCI1394_TNF_HAL_ERROR, ""); 2120 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_bwr_exit, 2121 HCI1394_TNF_HAL_STACK, ""); 2122 return (DDI_FAILURE); 2123 } 2124 hcicmd->ac_mblk_alloc = B_TRUE; 2125 2126 /* Copy ARREQ write data into mblk_t */ 2127 hci1394_q_ar_rep_get8(async_handle->as_arreq_q, 2128 (uint8_t *)cmd->cmd_u.b.data_block->b_wptr, 2129 (uint8_t *)&pkt->q5, cmd->cmd_u.b.blk_length); 2130 2131 /* Update mblk_t wptr */ 2132 cmd->cmd_u.b.data_block->b_wptr += cmd->cmd_u.b.blk_length; 2133 2134 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_bwr_exit, 2135 HCI1394_TNF_HAL_STACK, ""); 2136 2137 return (DDI_SUCCESS); 2138 } 2139 2140 2141 /* 2142 * hci1394_async_arreq_read_lck() 2143 * Read ARREQ lock request into the 1394 Framework command. This routine will 2144 * return DDI_FAILURE if it was not able to read the request succesfully. 2145 */ 2146 static int 2147 hci1394_async_arreq_read_lck(hci1394_async_handle_t async_handle, 2148 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size) 2149 { 2150 h1394_cmd_priv_t *cmd_priv; 2151 uint32_t *local_addr; 2152 cmd1394_cmd_t *cmd; 2153 uint8_t *data_addr; 2154 uint32_t quadlet; 2155 uint32_t length; 2156 2157 2158 ASSERT(async_handle != NULL); 2159 ASSERT(pkt != NULL); 2160 ASSERT(hcicmd != NULL); 2161 ASSERT(size != NULL); 2162 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_enter, 2163 HCI1394_TNF_HAL_STACK, ""); 2164 2165 /* 2166 * Setup shortcuts, command type, and size of request. The size of the 2167 * request is in quadlets, therefore we need to make sure we count in 2168 * the padding when figuring out the size (i.e. data may be in bytes 2169 * but the HW always pads to quadlets) 2170 */ 2171 cmd = hcicmd->ac_cmd; 2172 cmd_priv = hcicmd->ac_priv; 2173 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4); 2174 length = HCI1394_DESC_DATALEN_GET(quadlet); 2175 *size = DESC_SZ_AR_LOCK_REQ + HCI1394_ALIGN_QUAD(length); 2176 2177 /* make sure the length is a valid lock request length */ 2178 if (length == DESC_TWO_QUADS) { 2179 cmd->cmd_type = CMD1394_ASYNCH_LOCK_32; 2180 cmd->cmd_u.l32.lock_type = HCI1394_DESC_EXTTCODE_GET(quadlet); 2181 } else if (length == DESC_TWO_OCTLETS) { 2182 cmd->cmd_type = CMD1394_ASYNCH_LOCK_64; 2183 cmd->cmd_u.l64.lock_type = HCI1394_DESC_EXTTCODE_GET(quadlet); 2184 } else { 2185 TNF_PROBE_2(hci1394_async_arreq_lck_sz_fail, 2186 HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg, 2187 "unexpected length received", tnf_uint, locklen, length); 2188 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 2189 HCI1394_TNF_HAL_STACK, ""); 2190 return (DDI_FAILURE); 2191 } 2192 2193 /* 2194 * read in the ARREQ ACK/EVT, the speed, the time we received it, and 2195 * calculate the ATRESP timeout for when we send it. The status word is 2196 * the last quadlet in the packet. 2197 */ 2198 local_addr = (uint32_t *)(((uintptr_t)(&pkt->q5)) + 2199 ((uintptr_t)HCI1394_ALIGN_QUAD(length))); 2200 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, local_addr); 2201 hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet); 2202 cmd_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet); 2203 cmd_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet); 2204 hcicmd->ac_qcmd.qc_timestamp = hci1394_async_timeout_calc(async_handle, 2205 cmd_priv->recv_tstamp); 2206 2207 /* 2208 * if the ARREQ ACK was bad, we were unable to successfully read in this 2209 * request. Return failure. 2210 */ 2211 if ((hcicmd->ac_status != OHCI_ACK_COMPLETE) && 2212 (hcicmd->ac_status != OHCI_ACK_PENDING)) { 2213 TNF_PROBE_1(hci1394_async_arreq_read_ack_fail, 2214 HCI1394_TNF_HAL_ERROR, "", tnf_uint, arreq_ack, 2215 hcicmd->ac_status); 2216 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_exit, 2217 HCI1394_TNF_HAL_STACK, ""); 2218 return (DDI_FAILURE); 2219 } 2220 2221 /* Read in the tlabel and destination */ 2222 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q1); 2223 hcicmd->ac_dest = HCI1394_DESC_DESTID_GET(quadlet); 2224 hcicmd->ac_tlabel.tbi_tlabel = HCI1394_DESC_TLABEL_GET(quadlet); 2225 hcicmd->ac_mblk_alloc = B_FALSE; 2226 2227 /* 2228 * Read in the sender so we know who to send the ATRESP to. Read in 2229 * the 1394 48-bit address for this request. 2230 */ 2231 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2); 2232 cmd->nodeID = HCI1394_DESC_SRCID_GET(quadlet); 2233 cmd->cmd_addr = HCI1394_TO_ADDR_HI(quadlet); 2234 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3); 2235 cmd->cmd_addr |= HCI1394_TO_ADDR_LO(quadlet); 2236 2237 /* Copy ARREQ lock data into 1394 framework command */ 2238 if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) { 2239 data_addr = (uint8_t *)&pkt->q5; 2240 hci1394_q_ar_rep_get8(async_handle->as_arreq_q, 2241 (uint8_t *)&cmd->cmd_u.l32.arg_value, data_addr, 2242 IEEE1394_QUADLET); 2243 data_addr = (uint8_t *)((uintptr_t)data_addr + 2244 (uintptr_t)IEEE1394_QUADLET); 2245 hci1394_q_ar_rep_get8(async_handle->as_arreq_q, 2246 (uint8_t *)&cmd->cmd_u.l32.data_value, data_addr, 2247 IEEE1394_QUADLET); 2248 /* 2249 * swap these for our correct architecture if we are doing 2250 * arithmetic lock operations 2251 */ 2252 cmd->cmd_u.l32.arg_value = HCI1394_ARITH_LOCK_SWAP32( 2253 cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.arg_value); 2254 cmd->cmd_u.l32.data_value = HCI1394_ARITH_LOCK_SWAP32( 2255 cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.data_value); 2256 } else if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) { 2257 data_addr = (uint8_t *)&pkt->q5; 2258 hci1394_q_ar_rep_get8(async_handle->as_arreq_q, 2259 (uint8_t *)&cmd->cmd_u.l64.arg_value, data_addr, 2260 IEEE1394_OCTLET); 2261 data_addr = (uint8_t *)((uintptr_t)data_addr + 2262 (uintptr_t)IEEE1394_OCTLET); 2263 hci1394_q_ar_rep_get8(async_handle->as_arreq_q, 2264 (uint8_t *)&cmd->cmd_u.l64.data_value, data_addr, 2265 IEEE1394_OCTLET); 2266 2267 /* 2268 * swap these for our correct architecture if we are doing 2269 * arithmetic lock operations 2270 */ 2271 cmd->cmd_u.l64.arg_value = HCI1394_ARITH_LOCK_SWAP64( 2272 cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.arg_value); 2273 cmd->cmd_u.l64.data_value = HCI1394_ARITH_LOCK_SWAP64( 2274 cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.data_value); 2275 } 2276 2277 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_qrd_exit, 2278 HCI1394_TNF_HAL_STACK, ""); 2279 2280 return (DDI_SUCCESS); 2281 } 2282 2283 2284 /* 2285 * hci1394_async_arreq_read_phy() 2286 * Read ARREQ PHY quadlet into the 1394 Framework command. This routine will 2287 * return DDI_FAILURE if it was not able to read the request succesfully. 2288 */ 2289 static int 2290 hci1394_async_arreq_read_phy(hci1394_async_handle_t async_handle, 2291 hci1394_basic_pkt_t *pkt, hci1394_async_cmd_t *hcicmd, uint_t *size, 2292 boolean_t *bus_reset_token) 2293 { 2294 cmd1394_cmd_t *cmd; 2295 uint32_t quadlet; 2296 uint32_t data1; 2297 uint32_t data2; 2298 2299 2300 ASSERT(async_handle != NULL); 2301 ASSERT(pkt != NULL); 2302 ASSERT(hcicmd != NULL); 2303 ASSERT(size != NULL); 2304 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_phy_enter, 2305 HCI1394_TNF_HAL_STACK, ""); 2306 2307 /* Setup shortcuts, command type, and size of request */ 2308 cmd = hcicmd->ac_cmd; 2309 cmd->cmd_type = CMD1394_ASYNCH_WR_QUAD; 2310 *size = DESC_SZ_AR_PHY; 2311 2312 /* 2313 * read in the ARREQ ACK/EVT, the speed, the time we received it, and 2314 * set state that we do not use an mblk for this request. 2315 */ 2316 quadlet = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q4); 2317 hcicmd->ac_status = HCI1394_DESC_EVT_GET(quadlet); 2318 hcicmd->ac_priv->speed = HCI1394_DESC_AR_SPD_GET(quadlet); 2319 hcicmd->ac_priv->recv_tstamp = HCI1394_DESC_TIMESTAMP_GET(quadlet); 2320 hcicmd->ac_mblk_alloc = B_FALSE; 2321 2322 /* Read in the PHY packet quadlet and its check quadlet */ 2323 data1 = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q2); 2324 data2 = hci1394_q_ar_get32(async_handle->as_arreq_q, &pkt->q3); 2325 2326 /* 2327 * if this is a bus reset token, save away the generation. If the bus 2328 * reset token is for the current generation, we do not need to flush 2329 * the ARREQ Q anymore. 2330 */ 2331 if (hcicmd->ac_status == OHCI_EVT_BUS_RESET) { 2332 *bus_reset_token = B_TRUE; 2333 async_handle->as_phy_reset = HCI1394_DESC_PHYGEN_GET(data2); 2334 if (async_handle->as_phy_reset == hci1394_ohci_current_busgen( 2335 async_handle->as_ohci)) { 2336 async_handle->as_flushing_arreq = B_FALSE; 2337 } 2338 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_phy_exit, 2339 HCI1394_TNF_HAL_STACK, ""); 2340 return (DDI_SUCCESS); 2341 } 2342 2343 *bus_reset_token = B_FALSE; 2344 2345 /* if there is a data error in the PHY packet, return failure */ 2346 if (data1 != ~data2) { 2347 TNF_PROBE_2(hci1394_async_arreq_phy_xor_fail, 2348 HCI1394_TNF_HAL_ERROR, "", tnf_opaque, first_quadlet, 2349 data1, tnf_opaque, second_quadlet, data2); 2350 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_phy_exit, 2351 HCI1394_TNF_HAL_STACK, ""); 2352 return (DDI_FAILURE); 2353 } 2354 2355 /* Copy the PHY quadlet to the command */ 2356 cmd->cmd_u.q.quadlet_data = data1; 2357 2358 TNF_PROBE_0_DEBUG(hci1394_async_arreq_read_phy_exit, 2359 HCI1394_TNF_HAL_STACK, ""); 2360 2361 return (DDI_SUCCESS); 2362 } 2363 2364 2365 /* 2366 * hci1394_async_phy() 2367 * Queue up ATREQ phy packet. 2368 */ 2369 int 2370 hci1394_async_phy(hci1394_async_handle_t async_handle, cmd1394_cmd_t *cmd, 2371 h1394_cmd_priv_t *cmd_priv, int *result) 2372 { 2373 hci1394_basic_pkt_t header; 2374 hci1394_async_cmd_t *hcicmd; 2375 int status; 2376 2377 2378 ASSERT(async_handle != NULL); 2379 ASSERT(cmd != NULL); 2380 ASSERT(cmd_priv != NULL); 2381 ASSERT(result != NULL); 2382 2383 TNF_PROBE_0_DEBUG(hci1394_async_phy_enter, HCI1394_TNF_HAL_STACK, ""); 2384 2385 /* 2386 * make sure this call is during the current bus generation (i.e. no 2387 * bus resets have occured since this request was made. 2388 */ 2389 if (cmd_priv->bus_generation != hci1394_ohci_current_busgen( 2390 async_handle->as_ohci)) { 2391 *result = H1394_STATUS_INVALID_BUSGEN; 2392 TNF_PROBE_0_DEBUG(hci1394_async_phy_exit, 2393 HCI1394_TNF_HAL_STACK, ""); 2394 return (DDI_FAILURE); 2395 } 2396 2397 /* Initialize the private HAL command structure */ 2398 hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, &hcicmd); 2399 2400 /* We do not allocate a tlabel for a PHY packet */ 2401 hcicmd->ac_tlabel_alloc = B_FALSE; 2402 2403 /* 2404 * Setup the packet header information for a ATREQ PHY packet Add in 2405 * the tcode, phy quadlet, and it's 1's complement. 2406 */ 2407 header.q1 = DESC_ATREQ_Q1_PHY; 2408 header.q2 = cmd->cmd_u.q.quadlet_data; 2409 header.q3 = ~header.q2; 2410 2411 /* Write request into the ATREQ Q. If we fail, we're out of space */ 2412 status = hci1394_q_at(async_handle->as_atreq_q, &hcicmd->ac_qcmd, 2413 &header, DESC_PKT_HDRLEN_AT_PHY, result); 2414 if (status != DDI_SUCCESS) { 2415 TNF_PROBE_0(hci1394_async_phy_q_fail, HCI1394_TNF_HAL_ERROR, 2416 ""); 2417 TNF_PROBE_0_DEBUG(hci1394_async_phy_exit, 2418 HCI1394_TNF_HAL_STACK, ""); 2419 return (DDI_FAILURE); 2420 } 2421 2422 TNF_PROBE_0_DEBUG(hci1394_async_phy_exit, HCI1394_TNF_HAL_STACK, ""); 2423 2424 return (DDI_SUCCESS); 2425 } 2426 2427 2428 /* 2429 * hci1394_async_write() 2430 * Queue up ATREQ write. This could be either a block write or a quadlet 2431 * write. 2432 */ 2433 int 2434 hci1394_async_write(hci1394_async_handle_t async_handle, cmd1394_cmd_t *cmd, 2435 h1394_cmd_priv_t *cmd_priv, int *result) 2436 { 2437 hci1394_async_cmd_t *hcicmd; 2438 hci1394_basic_pkt_t header; 2439 int status; 2440 2441 2442 ASSERT(async_handle != NULL); 2443 ASSERT(cmd != NULL); 2444 ASSERT(cmd_priv != NULL); 2445 ASSERT(result != NULL); 2446 2447 TNF_PROBE_0_DEBUG(hci1394_async_write_enter, HCI1394_TNF_HAL_STACK, ""); 2448 2449 /* 2450 * make sure this call is during the current bus generation (i.e. no 2451 * bus resets have occured since this request was made. 2452 */ 2453 if (cmd_priv->bus_generation != hci1394_ohci_current_busgen( 2454 async_handle->as_ohci)) { 2455 *result = H1394_STATUS_INVALID_BUSGEN; 2456 TNF_PROBE_0_DEBUG(hci1394_async_write_exit, 2457 HCI1394_TNF_HAL_STACK, ""); 2458 return (DDI_FAILURE); 2459 } 2460 2461 /* Initialize the private HAL command structure */ 2462 hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, &hcicmd); 2463 hcicmd->ac_dest = (uint_t)(cmd->cmd_addr >> IEEE1394_ADDR_PHY_ID_SHIFT); 2464 2465 /* allocate a tlabel for this request */ 2466 status = hci1394_tlabel_alloc(async_handle->as_tlabel, hcicmd->ac_dest, 2467 &hcicmd->ac_tlabel); 2468 if (status != DDI_SUCCESS) { 2469 *result = H1394_STATUS_EMPTY_TLABEL; 2470 TNF_PROBE_0(hci1394_async_write_tlb_fail, 2471 HCI1394_TNF_HAL_ERROR, ""); 2472 TNF_PROBE_0_DEBUG(hci1394_async_write_exit, 2473 HCI1394_TNF_HAL_STACK, ""); 2474 return (DDI_FAILURE); 2475 } 2476 2477 /* 2478 * Setup the packet header information for a ATREQ write packet. We 2479 * will set the tcode later on since this could be a block write or 2480 * a quadlet write. Set SRCBusId if this write is not a local bus 2481 * access. Copy in the speed, tlabel, and destination address. 2482 */ 2483 header.q1 = 0; 2484 if ((hcicmd->ac_dest & IEEE1394_BUS_NUM_MASK) != 2485 IEEE1394_BUS_NUM_MASK) { 2486 header.q1 |= DESC_AT_SRCBUSID; 2487 } 2488 header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) | 2489 HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel); 2490 header.q2 = (uint32_t)(cmd->cmd_addr >> 32); 2491 header.q3 = (uint32_t)(cmd->cmd_addr & DESC_PKT_DESTOFFLO_MASK); 2492 2493 /* Register this command w/ its tlabel */ 2494 hci1394_tlabel_register(async_handle->as_tlabel, &hcicmd->ac_tlabel, 2495 hcicmd); 2496 2497 /* If this is a quadlet write ATREQ */ 2498 if (cmd->cmd_type == CMD1394_ASYNCH_WR_QUAD) { 2499 /* 2500 * setup the tcode for a quadlet write request and copy in 2501 * the quadlet data. Endian issues will be taken care of in 2502 * hci1394_q_at(). 2503 */ 2504 header.q1 |= DESC_ATREQ_Q1_QWR; 2505 header.q4 = cmd->cmd_u.q.quadlet_data; 2506 2507 /* 2508 * Write the request into the ATREQ Q. If we fail, we are out 2509 * of space. 2510 */ 2511 status = hci1394_q_at(async_handle->as_atreq_q, 2512 &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_WRITEQUAD, 2513 result); 2514 if (status != DDI_SUCCESS) { 2515 TNF_PROBE_0(hci1394_async_write_q_fail, 2516 HCI1394_TNF_HAL_ERROR, ""); 2517 TNF_PROBE_0_DEBUG(hci1394_async_write_exit, 2518 HCI1394_TNF_HAL_STACK, ""); 2519 return (DDI_FAILURE); 2520 } 2521 2522 /* This is a block write ATREQ */ 2523 } else { 2524 /* setup the tcode and the length of the block write */ 2525 header.q1 |= DESC_ATREQ_Q1_BWR; 2526 header.q4 = HCI1394_DESC_DATALEN_SET(cmd_priv->mblk.length); 2527 2528 /* 2529 * Write the request into the ATREQ Q. If we fail, we are out 2530 * of space. The data is in a mblk(s). We use a special 2531 * interface in the HAL/SL private command block to handle 2532 * partial transfers out of the mblk due to packet size 2533 * restrictions. 2534 */ 2535 status = hci1394_q_at_with_mblk(async_handle->as_atreq_q, 2536 &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_WRITEBLOCK, 2537 &cmd_priv->mblk, result); 2538 if (status != DDI_SUCCESS) { 2539 TNF_PROBE_0(hci1394_async_write_qmblk_fail, 2540 HCI1394_TNF_HAL_ERROR, ""); 2541 TNF_PROBE_0_DEBUG(hci1394_async_write_exit, 2542 HCI1394_TNF_HAL_STACK, ""); 2543 return (DDI_FAILURE); 2544 } 2545 } 2546 2547 TNF_PROBE_0_DEBUG(hci1394_async_write_exit, HCI1394_TNF_HAL_STACK, ""); 2548 2549 return (DDI_SUCCESS); 2550 } 2551 2552 2553 /* 2554 * hci1394_async_read() 2555 * Queue up ATREQ read. This could be either a block read or a quadlet 2556 * read. 2557 */ 2558 int 2559 hci1394_async_read(hci1394_async_handle_t async_handle, cmd1394_cmd_t *cmd, 2560 h1394_cmd_priv_t *cmd_priv, int *result) 2561 { 2562 hci1394_basic_pkt_t header; 2563 int status; 2564 hci1394_async_cmd_t *hcicmd; 2565 2566 2567 ASSERT(async_handle != NULL); 2568 ASSERT(cmd != NULL); 2569 ASSERT(cmd_priv != NULL); 2570 ASSERT(result != NULL); 2571 2572 TNF_PROBE_0_DEBUG(hci1394_async_read_enter, HCI1394_TNF_HAL_STACK, ""); 2573 2574 /* 2575 * make sure this call is during the current bus generation (i.e. no 2576 * bus resets have occured since this request was made. 2577 */ 2578 if (cmd_priv->bus_generation != hci1394_ohci_current_busgen( 2579 async_handle->as_ohci)) { 2580 *result = H1394_STATUS_INVALID_BUSGEN; 2581 TNF_PROBE_0_DEBUG(hci1394_async_read_exit, 2582 HCI1394_TNF_HAL_STACK, ""); 2583 return (DDI_FAILURE); 2584 } 2585 2586 /* Initialize the private HAL command structure */ 2587 hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, &hcicmd); 2588 hcicmd->ac_dest = (uint_t)(cmd->cmd_addr >> IEEE1394_ADDR_PHY_ID_SHIFT); 2589 2590 /* allocate a tlabel for this request */ 2591 status = hci1394_tlabel_alloc(async_handle->as_tlabel, hcicmd->ac_dest, 2592 &hcicmd->ac_tlabel); 2593 if (status != DDI_SUCCESS) { 2594 *result = H1394_STATUS_EMPTY_TLABEL; 2595 TNF_PROBE_0(hci1394_async_read_tlb_fail, 2596 HCI1394_TNF_HAL_ERROR, ""); 2597 TNF_PROBE_0_DEBUG(hci1394_async_read_exit, 2598 HCI1394_TNF_HAL_STACK, ""); 2599 return (DDI_FAILURE); 2600 } 2601 2602 /* 2603 * Setup the packet header information for a ATREQ read packet. We 2604 * will set the tcode later on since this could be a block read or 2605 * a quadlet read. Set SRCBusId if this read is not a local bus 2606 * access. Copy in the speed, tlabel, and destination address. 2607 */ 2608 header.q1 = 0; 2609 if ((hcicmd->ac_dest & IEEE1394_BUS_NUM_MASK) != 2610 IEEE1394_BUS_NUM_MASK) { 2611 header.q1 |= DESC_AT_SRCBUSID; 2612 } 2613 header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) | 2614 HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel); 2615 header.q2 = (uint32_t)(cmd->cmd_addr >> 32); 2616 header.q3 = (uint32_t)(cmd->cmd_addr & DESC_PKT_DESTOFFLO_MASK); 2617 2618 /* Register this command w/ its tlabel */ 2619 hci1394_tlabel_register(async_handle->as_tlabel, &hcicmd->ac_tlabel, 2620 hcicmd); 2621 2622 /* If this is a quadlet read ATREQ */ 2623 if (cmd->cmd_type == CMD1394_ASYNCH_RD_QUAD) { 2624 /* setup the tcode for a quadlet read request */ 2625 header.q1 |= DESC_ATREQ_Q1_QRD; 2626 header.q4 = 0; 2627 2628 /* 2629 * Write the request into the ATREQ Q. If we fail, we are out 2630 * of space. 2631 */ 2632 status = hci1394_q_at(async_handle->as_atreq_q, 2633 &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_READQUAD, 2634 result); 2635 if (status != DDI_SUCCESS) { 2636 TNF_PROBE_0(hci1394_async_read_q_fail, 2637 HCI1394_TNF_HAL_ERROR, ""); 2638 TNF_PROBE_0_DEBUG(hci1394_async_read_exit, 2639 HCI1394_TNF_HAL_STACK, ""); 2640 return (DDI_FAILURE); 2641 } 2642 2643 } else { 2644 /* setup the tcode and the length of the block read */ 2645 header.q1 |= DESC_ATREQ_Q1_BRD; 2646 header.q4 = HCI1394_DESC_DATALEN_SET(cmd_priv->mblk.length); 2647 2648 /* 2649 * Write the request into the ATREQ Q. If we fail, we are out 2650 * of space. 2651 */ 2652 status = hci1394_q_at(async_handle->as_atreq_q, 2653 &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_READBLOCK, 2654 result); 2655 if (status != DDI_SUCCESS) { 2656 TNF_PROBE_0(hci1394_async_read_qb_fail, 2657 HCI1394_TNF_HAL_ERROR, ""); 2658 TNF_PROBE_0_DEBUG(hci1394_async_read_exit, 2659 HCI1394_TNF_HAL_STACK, ""); 2660 return (DDI_FAILURE); 2661 } 2662 } 2663 2664 TNF_PROBE_0_DEBUG(hci1394_async_read_exit, HCI1394_TNF_HAL_STACK, ""); 2665 2666 return (DDI_SUCCESS); 2667 } 2668 2669 2670 /* 2671 * hci1394_async_lock() 2672 * Queue up ATREQ lock. This could be either a 32-bit or 64-bit lock 2673 * request. 2674 */ 2675 int 2676 hci1394_async_lock(hci1394_async_handle_t async_handle, cmd1394_cmd_t *cmd, 2677 h1394_cmd_priv_t *cmd_priv, int *result) 2678 { 2679 hci1394_basic_pkt_t header; 2680 hci1394_async_cmd_t *hcicmd; 2681 uint32_t data32[2]; 2682 uint64_t data64[2]; 2683 uint8_t *datap; 2684 uint_t size; 2685 int status; 2686 2687 2688 ASSERT(async_handle != NULL); 2689 ASSERT(cmd != NULL); 2690 ASSERT(cmd_priv != NULL); 2691 ASSERT(result != NULL); 2692 2693 TNF_PROBE_0_DEBUG(hci1394_async_lock_enter, HCI1394_TNF_HAL_STACK, ""); 2694 2695 /* 2696 * make sure this call is during the current bus generation (i.e. no 2697 * bus resets have occured since this request was made. 2698 */ 2699 if (cmd_priv->bus_generation != hci1394_ohci_current_busgen( 2700 async_handle->as_ohci)) { 2701 *result = H1394_STATUS_INVALID_BUSGEN; 2702 TNF_PROBE_0_DEBUG(hci1394_async_lock_exit, 2703 HCI1394_TNF_HAL_STACK, ""); 2704 return (DDI_FAILURE); 2705 } 2706 2707 /* Initialize the private HAL command structure */ 2708 hci1394_async_hcicmd_init(async_handle, cmd, cmd_priv, &hcicmd); 2709 hcicmd->ac_dest = (uint_t)(cmd->cmd_addr >> IEEE1394_ADDR_PHY_ID_SHIFT); 2710 2711 /* allocate a tlabel for this request */ 2712 status = hci1394_tlabel_alloc(async_handle->as_tlabel, hcicmd->ac_dest, 2713 &hcicmd->ac_tlabel); 2714 if (status != DDI_SUCCESS) { 2715 *result = H1394_STATUS_EMPTY_TLABEL; 2716 TNF_PROBE_0(hci1394_async_lock_tlb_fail, 2717 HCI1394_TNF_HAL_ERROR, ""); 2718 TNF_PROBE_0_DEBUG(hci1394_async_lock_exit, 2719 HCI1394_TNF_HAL_STACK, ""); 2720 return (DDI_FAILURE); 2721 } 2722 2723 /* Register this command w/ its tlabel */ 2724 hci1394_tlabel_register(async_handle->as_tlabel, &hcicmd->ac_tlabel, 2725 hcicmd); 2726 2727 /* 2728 * Setup the packet header information for a ATREQ lock packet. Set 2729 * the tcode up as a lock request. Set SRCBusId if this lock is not a 2730 * local bus access. Copy in the speed, tlabel, and destination 2731 * address. 2732 */ 2733 header.q1 = DESC_ATREQ_Q1_LCK; 2734 if ((hcicmd->ac_dest & IEEE1394_BUS_NUM_MASK) != 2735 IEEE1394_BUS_NUM_MASK) { 2736 header.q1 |= DESC_AT_SRCBUSID; 2737 } 2738 header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) | 2739 HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel); 2740 header.q2 = (uint32_t)(cmd->cmd_addr >> 32); 2741 header.q3 = (uint32_t)(cmd->cmd_addr & DESC_PKT_DESTOFFLO_MASK); 2742 2743 /* 2744 * Setup the lock length based on what size lock operation we are 2745 * performing. If it isn't a lock32 or lock64, we have encountered an 2746 * internal error. Copy the lock data into a local data buffer. Perform 2747 * a byte swap if it is an arithmetic lock operation and we are on a 2748 * little endian machine. 2749 */ 2750 if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) { 2751 size = DESC_TWO_QUADS; 2752 header.q4 = HCI1394_DESC_DATALEN_SET(size) | 2753 HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l32.lock_type); 2754 data32[0] = HCI1394_ARITH_LOCK_SWAP32( 2755 cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.arg_value); 2756 data32[1] = HCI1394_ARITH_LOCK_SWAP32( 2757 cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.data_value); 2758 datap = (uint8_t *)data32; 2759 } else if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) { 2760 size = DESC_TWO_OCTLETS; 2761 header.q4 = HCI1394_DESC_DATALEN_SET(size) | 2762 HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l64.lock_type); 2763 data64[0] = HCI1394_ARITH_LOCK_SWAP64( 2764 cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.arg_value); 2765 data64[1] = HCI1394_ARITH_LOCK_SWAP64( 2766 cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.data_value); 2767 datap = (uint8_t *)data64; 2768 } else { 2769 *result = H1394_STATUS_INTERNAL_ERROR; 2770 TNF_PROBE_0(hci1394_lock_length_fail, 2771 HCI1394_TNF_HAL_ERROR, ""); 2772 TNF_PROBE_0_DEBUG(hci1394_async_lock_exit, 2773 HCI1394_TNF_HAL_STACK, ""); 2774 return (DDI_FAILURE); 2775 } 2776 2777 /* Write request into the ATREQ Q. If we fail, we're out of space */ 2778 status = hci1394_q_at_with_data(async_handle->as_atreq_q, 2779 &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_LOCK, datap, size, 2780 result); 2781 if (status != DDI_SUCCESS) { 2782 TNF_PROBE_0(hci1394_async_lock_q_fail, 2783 HCI1394_TNF_HAL_ERROR, ""); 2784 TNF_PROBE_0_DEBUG(hci1394_async_lock_exit, 2785 HCI1394_TNF_HAL_STACK, ""); 2786 return (DDI_FAILURE); 2787 } 2788 2789 TNF_PROBE_0_DEBUG(hci1394_async_lock_exit, HCI1394_TNF_HAL_STACK, ""); 2790 2791 return (DDI_SUCCESS); 2792 } 2793 2794 2795 /* 2796 * hci1394_async_write_response() 2797 * Send a write ATRESP. This routine should be called from the Services 2798 * layer to send a response to a received write request (ARREQ). The same 2799 * response is sent to a quadlet and block write request. 2800 */ 2801 int 2802 hci1394_async_write_response(hci1394_async_handle_t async_handle, 2803 cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv, int *result) 2804 { 2805 hci1394_basic_pkt_t header; 2806 int status; 2807 hci1394_async_cmd_t *hcicmd; 2808 2809 2810 ASSERT(async_handle != NULL); 2811 ASSERT(cmd != NULL); 2812 ASSERT(cmd_priv != NULL); 2813 ASSERT(result != NULL); 2814 2815 TNF_PROBE_0_DEBUG(hci1394_async_write_response_enter, 2816 HCI1394_TNF_HAL_STACK, ""); 2817 2818 /* 2819 * make sure this call is during the current bus generation (i.e. no 2820 * bus resets have occured since this request was made. 2821 */ 2822 if (cmd_priv->bus_generation != hci1394_ohci_current_busgen( 2823 async_handle->as_ohci)) { 2824 *result = H1394_STATUS_INVALID_BUSGEN; 2825 TNF_PROBE_0_DEBUG(hci1394_async_write_response_exit, 2826 HCI1394_TNF_HAL_STACK, ""); 2827 return (DDI_FAILURE); 2828 } 2829 2830 /* 2831 * setup a shortcut to the hal private command area. Copy the generation 2832 * to the Q area so that we can check the generation when the AT Q is 2833 * locked. This prevents us from loosing commands due to race 2834 * conditions. 2835 */ 2836 hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead; 2837 hcicmd->ac_qcmd.qc_generation = cmd_priv->bus_generation; 2838 2839 /* 2840 * Setup the packet header information for a ATRESP write packet. Set 2841 * the tcode for a write response. Set SRCBusId if the addr is not a 2842 * local bus address. Copy in the speed, tlabel, and response code. 2843 */ 2844 header.q1 = DESC_ATRESP_Q1_WR; 2845 if ((cmd->nodeID & IEEE1394_BUS_NUM_MASK) != IEEE1394_BUS_NUM_MASK) { 2846 header.q1 |= DESC_AT_SRCBUSID; 2847 } 2848 header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) | 2849 HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel); 2850 header.q2 = (HCI1394_DESC_DESTID_SET(cmd->nodeID) | 2851 HCI1394_DESC_RCODE_SET(cmd->cmd_result)); 2852 header.q3 = 0; 2853 2854 /* Write response into the ATRESP Q. If we fail, we're out of space */ 2855 status = hci1394_q_at(async_handle->as_atresp_q, &hcicmd->ac_qcmd, 2856 &header, DESC_PKT_HDRLEN_AT_WRITE_RESP, result); 2857 if (status != DDI_SUCCESS) { 2858 TNF_PROBE_0(hci1394_async_write_response_q_fail, 2859 HCI1394_TNF_HAL_ERROR, ""); 2860 TNF_PROBE_0_DEBUG(hci1394_async_write_response_exit, 2861 HCI1394_TNF_HAL_STACK, ""); 2862 return (DDI_FAILURE); 2863 } 2864 2865 TNF_PROBE_0_DEBUG(hci1394_async_write_response_exit, 2866 HCI1394_TNF_HAL_STACK, ""); 2867 2868 return (DDI_SUCCESS); 2869 } 2870 2871 2872 /* 2873 * hci1394_async_read_response() 2874 * Send a read ATRESP. This routine should be called from the Services 2875 * layer to send a response to a received read request (ARREQ). The 2876 * response will differ between quadlet/block read requests. 2877 */ 2878 int 2879 hci1394_async_read_response(hci1394_async_handle_t async_handle, 2880 cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv, int *result) 2881 { 2882 hci1394_basic_pkt_t header; 2883 int status; 2884 hci1394_async_cmd_t *hcicmd; 2885 2886 2887 ASSERT(async_handle != NULL); 2888 ASSERT(cmd != NULL); 2889 ASSERT(cmd_priv != NULL); 2890 ASSERT(result != NULL); 2891 2892 TNF_PROBE_0_DEBUG(hci1394_async_read_response_enter, 2893 HCI1394_TNF_HAL_STACK, ""); 2894 2895 /* 2896 * make sure this call is during the current bus generation (i.e. no 2897 * bus resets have occured since this request was made. 2898 */ 2899 if (cmd_priv->bus_generation != hci1394_ohci_current_busgen( 2900 async_handle->as_ohci)) { 2901 *result = H1394_STATUS_INVALID_BUSGEN; 2902 TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit, 2903 HCI1394_TNF_HAL_STACK, ""); 2904 return (DDI_FAILURE); 2905 } 2906 2907 /* 2908 * setup a shortcut to the hal private command area. Copy the generation 2909 * to the Q area so that we can check the generation when the AT Q is 2910 * locked. This prevents us from loosing commands due to race 2911 * conditions. 2912 */ 2913 hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead; 2914 hcicmd->ac_qcmd.qc_generation = cmd_priv->bus_generation; 2915 2916 /* 2917 * Setup the packet header information for a ATRESP read packet. we 2918 * will set the tcode later based on type of read response. Set 2919 * SRCBusId if the addr is not a local bus address. Copy in the 2920 * speed, tlabel, and response code. 2921 */ 2922 header.q1 = 0; 2923 if ((cmd->nodeID & IEEE1394_BUS_NUM_MASK) != IEEE1394_BUS_NUM_MASK) { 2924 header.q1 |= DESC_AT_SRCBUSID; 2925 } 2926 header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) | 2927 HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel); 2928 header.q2 = (uint32_t)(HCI1394_DESC_DESTID_SET(cmd->nodeID) | 2929 HCI1394_DESC_RCODE_SET(cmd->cmd_result)); 2930 header.q3 = 0; 2931 2932 /* if the response is a read quadlet response */ 2933 if (cmd->cmd_type == CMD1394_ASYNCH_RD_QUAD) { 2934 /* 2935 * setup the tcode for a quadlet read response, If the 2936 * response code is not resp complete. 2937 */ 2938 header.q1 |= DESC_ATRESP_Q1_QRD; 2939 if (cmd->cmd_result == IEEE1394_RESP_COMPLETE) { 2940 header.q4 = cmd->cmd_u.q.quadlet_data; 2941 } else { 2942 header.q4 = 0x0; 2943 } 2944 2945 /* 2946 * Write response into the ATRESP Q. If we fail, we're out of 2947 * space. 2948 */ 2949 status = hci1394_q_at(async_handle->as_atresp_q, 2950 &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_READQUAD_RESP, 2951 result); 2952 if (status != DDI_SUCCESS) { 2953 TNF_PROBE_0(hci1394_async_read_response_q_fail, 2954 HCI1394_TNF_HAL_ERROR, ""); 2955 TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit, 2956 HCI1394_TNF_HAL_STACK, ""); 2957 return (DDI_FAILURE); 2958 } 2959 2960 /* 2961 * the response is a block read response. If the result is not a 2962 * resp complete, we are not going to send any data back. 2963 */ 2964 } else if ((cmd->cmd_type == CMD1394_ASYNCH_RD_BLOCK) && 2965 (cmd->cmd_result != IEEE1394_RESP_COMPLETE)) { 2966 /* 2967 * Setup the tcode for a block read response, set the data 2968 * length to zero since we had an error. 2969 */ 2970 header.q1 |= DESC_ATRESP_Q1_BRD; 2971 header.q4 = 0x0; 2972 2973 /* 2974 * Write response into the ATRESP Q. If we fail, we're out of 2975 * space. 2976 */ 2977 status = hci1394_q_at(async_handle->as_atresp_q, 2978 &hcicmd->ac_qcmd, &header, 2979 DESC_PKT_HDRLEN_AT_READBLOCK_RESP, result); 2980 if (status != DDI_SUCCESS) { 2981 TNF_PROBE_0(hci1394_async_read_response_qbf_fail, 2982 HCI1394_TNF_HAL_ERROR, ""); 2983 TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit, 2984 HCI1394_TNF_HAL_STACK, ""); 2985 return (DDI_FAILURE); 2986 } 2987 2988 /* 2989 * the response is a block read response with a resp complete for the 2990 * response code. Send back the read data. 2991 */ 2992 } else { 2993 /* 2994 * Setup the tcode for a block read response, setup the data 2995 * length. 2996 */ 2997 header.q1 |= DESC_ATRESP_Q1_BRD; 2998 header.q4 = HCI1394_DESC_DATALEN_SET(cmd->cmd_u.b.blk_length); 2999 3000 /* 3001 * Write response into the ATRESP Q. If we fail, we're out of 3002 * space. Use the data in the mblk. 3003 */ 3004 status = hci1394_q_at_with_mblk(async_handle->as_atresp_q, 3005 &hcicmd->ac_qcmd, &header, 3006 DESC_PKT_HDRLEN_AT_READBLOCK_RESP, &cmd_priv->mblk, result); 3007 if (status != DDI_SUCCESS) { 3008 TNF_PROBE_0(hci1394_async_read_response_qb_fail, 3009 HCI1394_TNF_HAL_ERROR, ""); 3010 TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit, 3011 HCI1394_TNF_HAL_STACK, ""); 3012 return (DDI_FAILURE); 3013 } 3014 } 3015 3016 TNF_PROBE_0_DEBUG(hci1394_async_read_response_exit, 3017 HCI1394_TNF_HAL_STACK, ""); 3018 3019 return (DDI_SUCCESS); 3020 } 3021 3022 3023 /* 3024 * hci1394_async_lock_response() 3025 * Send a lock ATRESP. This routine should be called from the Services 3026 * layer to send a response to a received lock request (ARREQ). The 3027 * response will differ between 32-bit/64-bit lock requests. 3028 */ 3029 int 3030 hci1394_async_lock_response(hci1394_async_handle_t async_handle, 3031 cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv, int *result) 3032 { 3033 hci1394_basic_pkt_t header; 3034 hci1394_async_cmd_t *hcicmd; 3035 uint32_t data32; 3036 uint64_t data64; 3037 uint8_t *datap; 3038 uint_t size; 3039 int status; 3040 3041 3042 ASSERT(async_handle != NULL); 3043 ASSERT(cmd != NULL); 3044 ASSERT(cmd_priv != NULL); 3045 ASSERT(result != NULL); 3046 3047 TNF_PROBE_0_DEBUG(hci1394_async_lock_response_enter, 3048 HCI1394_TNF_HAL_STACK, ""); 3049 3050 /* 3051 * make sure this call is during the current bus generation (i.e. no 3052 * bus resets have occured since this request was made. 3053 */ 3054 if (cmd_priv->bus_generation != hci1394_ohci_current_busgen( 3055 async_handle->as_ohci)) { 3056 *result = H1394_STATUS_INVALID_BUSGEN; 3057 TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit, 3058 HCI1394_TNF_HAL_STACK, ""); 3059 return (DDI_FAILURE); 3060 } 3061 3062 /* 3063 * setup a shortcut to the hal private command area. Copy the generation 3064 * to the Q area so that we can check the generation when the AT Q is 3065 * locked. This prevents us from loosing commands due to race 3066 * conditions. 3067 */ 3068 hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead; 3069 hcicmd->ac_qcmd.qc_generation = cmd_priv->bus_generation; 3070 3071 /* 3072 * Setup the packet header information for a ATRESP lock packet. Set 3073 * the tcode for a lock response. Set SRCBusId if the addr is not a 3074 * local bus address. Copy in the speed, tlabel, and response code. 3075 */ 3076 header.q1 = DESC_ATRESP_Q1_LCK; 3077 if ((cmd->nodeID & IEEE1394_BUS_NUM_MASK) != IEEE1394_BUS_NUM_MASK) { 3078 header.q1 |= DESC_AT_SRCBUSID; 3079 } 3080 header.q1 |= HCI1394_DESC_AT_SPD_SET(cmd_priv->speed) | 3081 HCI1394_DESC_TLABEL_SET(hcicmd->ac_tlabel.tbi_tlabel); 3082 header.q2 = (uint32_t)(HCI1394_DESC_DESTID_SET(cmd->nodeID) | 3083 HCI1394_DESC_RCODE_SET(cmd->cmd_result)); 3084 header.q3 = 0; 3085 3086 /* 3087 * If the lock result is not a resp complete, we are not going to send 3088 * any data back.with the response. 3089 */ 3090 if (cmd->cmd_result != IEEE1394_RESP_COMPLETE) { 3091 /* set response size to 0 for error. Set the extended tcode */ 3092 size = 0; 3093 if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) { 3094 header.q4 = HCI1394_DESC_DATALEN_SET(size) | 3095 HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l32.lock_type); 3096 } else { 3097 header.q4 = HCI1394_DESC_DATALEN_SET(size) | 3098 HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l64.lock_type); 3099 } 3100 3101 /* 3102 * Write response into the ATRESP Q. If we fail, we're out of 3103 * space. 3104 */ 3105 status = hci1394_q_at(async_handle->as_atresp_q, 3106 &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_LOCK_RESP, 3107 result); 3108 if (status != DDI_SUCCESS) { 3109 TNF_PROBE_0(hci1394_q_alloc_fail, 3110 HCI1394_TNF_HAL_ERROR, ""); 3111 TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit, 3112 HCI1394_TNF_HAL_STACK, ""); 3113 return (DDI_FAILURE); 3114 } 3115 TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit, 3116 HCI1394_TNF_HAL_STACK, ""); 3117 return (DDI_SUCCESS); 3118 } 3119 3120 /* 3121 * if the lock result is resp complete, setup the size of the response 3122 * depending on the lock size and copy the lock response data into a 3123 * local buffer. If the lock response is an arithmetic operation, swap 3124 * the data on little endian machines. If we don't know what type of 3125 * lock operation it is, someone has corrupted the command since we 3126 * had received the ARREQ. 3127 */ 3128 if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_32) { 3129 size = IEEE1394_QUADLET; 3130 header.q4 = HCI1394_DESC_DATALEN_SET(size) | 3131 HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l32.lock_type); 3132 data32 = HCI1394_ARITH_LOCK_SWAP32( 3133 cmd->cmd_u.l32.lock_type, cmd->cmd_u.l32.old_value); 3134 datap = (uint8_t *)&data32; 3135 } else if (cmd->cmd_type == CMD1394_ASYNCH_LOCK_64) { 3136 size = IEEE1394_OCTLET; 3137 header.q4 = HCI1394_DESC_DATALEN_SET(size) | 3138 HCI1394_DESC_EXTTCODE_SET(cmd->cmd_u.l64.lock_type); 3139 data64 = HCI1394_ARITH_LOCK_SWAP64( 3140 cmd->cmd_u.l64.lock_type, cmd->cmd_u.l64.old_value); 3141 datap = (uint8_t *)&data64; 3142 } else { 3143 *result = H1394_STATUS_INTERNAL_ERROR; 3144 TNF_PROBE_0(hci1394_lock_type_fail, HCI1394_TNF_HAL_ERROR, ""); 3145 TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit, 3146 HCI1394_TNF_HAL_STACK, ""); 3147 return (DDI_FAILURE); 3148 } 3149 3150 /* 3151 * Write response into the ATRESP Q. If we fail, we're out of space. 3152 * Use the local data buffer that we copied the data to above. 3153 */ 3154 status = hci1394_q_at_with_data(async_handle->as_atresp_q, 3155 &hcicmd->ac_qcmd, &header, DESC_PKT_HDRLEN_AT_LOCK_RESP, datap, 3156 size, result); 3157 if (status != DDI_SUCCESS) { 3158 TNF_PROBE_0(hci1394_q_alloc_fail, HCI1394_TNF_HAL_ERROR, ""); 3159 TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit, 3160 HCI1394_TNF_HAL_STACK, ""); 3161 return (DDI_FAILURE); 3162 } 3163 3164 TNF_PROBE_0_DEBUG(hci1394_async_lock_response_exit, 3165 HCI1394_TNF_HAL_STACK, ""); 3166 3167 return (DDI_SUCCESS); 3168 } 3169 3170 3171 /* 3172 * hci1394_async_response_complete() 3173 * Free up space allocted during an ARREQ. This is called when the target 3174 * driver and Services Layer are done with a command which was by the HAL 3175 * during ARREQ processing. This routine will also free up any allocated 3176 * mblks. 3177 * 3178 * NOTE: a target driver can hold on to a block write ARREQ mblk by setting 3179 * the mblk pointer to NULL. This ONLY applies to block write ARREQs. The 3180 * HAL will no longer track the mblk for this case. 3181 */ 3182 void 3183 hci1394_async_response_complete(hci1394_async_handle_t async_handle, 3184 cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv) 3185 { 3186 hci1394_async_cmd_t *hcicmd; 3187 3188 3189 ASSERT(async_handle != NULL); 3190 ASSERT(cmd != NULL); 3191 ASSERT(cmd_priv != NULL); 3192 3193 TNF_PROBE_0_DEBUG(hci1394_async_response_complete_enter, 3194 HCI1394_TNF_HAL_STACK, ""); 3195 3196 hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead; 3197 3198 /* If we allocated an mblk for this command */ 3199 if (hcicmd->ac_mblk_alloc == B_TRUE) { 3200 /* 3201 * Don't free mblk if it is set to NULL. This allows a target 3202 * driver to hold on to it in the case of a block write ARREQ. 3203 */ 3204 if (cmd->cmd_u.b.data_block != NULL) { 3205 freeb(cmd->cmd_u.b.data_block); 3206 } 3207 } 3208 3209 /* free up the 1394 framework command */ 3210 (void) h1394_free_cmd((void *)async_handle->as_drvinfo->di_sl_private, 3211 &cmd); 3212 3213 TNF_PROBE_0_DEBUG(hci1394_async_response_complete_exit, 3214 HCI1394_TNF_HAL_STACK, ""); 3215 } 3216 3217 3218 /* 3219 * hci1394_async_pending_timeout() 3220 * This is the ARREQ Pending timeout callback routine. It is called from 3221 * the tlist code. There is a race condition with the ARRESP interrupt 3222 * handler (hci1394_async_arresp_process) which requires a mutex to 3223 * lock around the mark of the bad tlabel. 3224 * 3225 * Once we enter this routine, the command has timed out. If the command is 3226 * in both the ARRESP handler and here, we will consider it to have timed 3227 * out. That code path handles the race condition more easily. 3228 */ 3229 static void 3230 hci1394_async_pending_timeout(hci1394_tlist_node_t *node, void *arg) 3231 { 3232 hci1394_async_handle_t async_handle; 3233 hci1394_async_cmd_t *hcicmd; 3234 3235 3236 async_handle = (hci1394_async_handle_t)arg; 3237 ASSERT(async_handle != NULL); 3238 ASSERT(node != NULL); 3239 TNF_PROBE_0_DEBUG(hci1394_async_pending_timeout_enter, 3240 HCI1394_TNF_HAL_STACK, ""); 3241 3242 hcicmd = (hci1394_async_cmd_t *)node->tln_addr; 3243 3244 /* 3245 * We do NOT want to set the command state here. That should only be 3246 * done in the ISR. The state does nothing for us here. 3247 */ 3248 3249 /* 3250 * We want a lock around tlabel_lookup/reading data into the cmd in the 3251 * ARRESP ISR processing and a lock around the tlabel_bad in this 3252 * routine. This ensures that we will not be touching the command 3253 * structure after we pass it up to the Services Layer. If we mark it as 3254 * bad first, the lookup will fail. If we get to the lookup first, the 3255 * pending list delete will fail in arresp_process() which will tell 3256 * that guy that we are in the middle of doing the timeout processing 3257 * for this command. The ARRESP logic will just drop the response and 3258 * continue on. 3259 */ 3260 mutex_enter(&hcicmd->ac_async->as_atomic_lookup); 3261 hci1394_tlabel_bad(async_handle->as_tlabel, &hcicmd->ac_tlabel); 3262 mutex_exit(&hcicmd->ac_async->as_atomic_lookup); 3263 3264 /* Tell the Services Layer that the command has timed out */ 3265 h1394_cmd_is_complete(async_handle->as_drvinfo->di_sl_private, 3266 hcicmd->ac_cmd, H1394_AT_REQ, H1394_CMD_ETIMEOUT); 3267 3268 TNF_PROBE_0_DEBUG(hci1394_async_pending_timeout_exit, 3269 HCI1394_TNF_HAL_STACK, ""); 3270 } 3271 3272 3273 /* 3274 * hci1394_async_timeout_calc() 3275 * Calculate the timeout for an ATRESP. When an ARREQ is received, this 3276 * routine is called with the time the ARREQ was received. It returns the 3277 * time when the ATRESP is considered to have timed out. We timeout after 3278 * split_timeout has gone by. Split timeout and the returned value are in bus 3279 * cycles. 3280 */ 3281 static uint_t 3282 hci1394_async_timeout_calc(hci1394_async_handle_t async_handle, 3283 uint_t current_time) 3284 { 3285 uint_t split_timeout; 3286 uint_t temp; 3287 uint_t carry; 3288 uint_t z; 3289 3290 3291 TNF_PROBE_0_DEBUG(hci1394_async_timeout_calc_enter, 3292 HCI1394_TNF_HAL_STACK, ""); 3293 3294 /* Get the current split timeout */ 3295 split_timeout = hci1394_csr_split_timeout_get(async_handle->as_csr); 3296 3297 /* 3298 * The cycle count is broken up into two sections, the 3-bit seconds 3299 * field and the 13-bit cycle count. The cycle count is in 125uS 3300 * increments. The maximum value of cycle count is 7999 (8000 is one 3301 * second). With 13-bits, we could store up to 8191. Therefore, we don't 3302 * have a simple 16-bit addition. Hence, the code we see below. 3303 */ 3304 3305 /* 3306 * calculate the new cycle count based on the cycle count from current 3307 * time and the split timeout. If this new value is not greater than the 3308 * maximum cycle count, we don't have a carry. Go to the next step. 3309 */ 3310 temp = (current_time & OHCI_CYCLE_CNT_MASK) + (split_timeout & 3311 OHCI_CYCLE_CNT_MASK); 3312 if (temp < OHCI_MAX_CYCLE_CNT) { 3313 carry = 0; 3314 3315 /* 3316 * the new cycle count adds up to more than the maximum cycle count, 3317 * set the carry state and adjust the total accordingly. 3318 */ 3319 } else { 3320 temp = temp - OHCI_MAX_CYCLE_CNT; 3321 carry = 1; 3322 } 3323 3324 /* 3325 * The timeout time equals the seconds added with the carry (1 or 0 3326 * seconds), added with the adjusted (if necessary) cycle count. 3327 * Mask the final value to get rid of any second rollovers. 3328 */ 3329 z = (current_time & OHCI_CYCLE_SEC_MASK) + (split_timeout & 3330 OHCI_CYCLE_SEC_MASK) + (carry << OHCI_CYCLE_SEC_SHIFT) + temp; 3331 z = z & OHCI_TIMESTAMP_MASK; 3332 3333 TNF_PROBE_0_DEBUG(hci1394_async_timeout_calc_exit, 3334 HCI1394_TNF_HAL_STACK, ""); 3335 3336 return (z); 3337 } 3338 3339 3340 /* 3341 * hci1394_async_arresp_size_get() 3342 * Return the size of the arresp that was received in q_handle at addr. 3343 */ 3344 static int 3345 hci1394_async_arresp_size_get(uint_t tcode, hci1394_q_handle_t q_handle, 3346 uint32_t *addr, uint_t *size) 3347 { 3348 uint_t data_length; 3349 uint32_t quadlet; 3350 3351 3352 ASSERT(q_handle != NULL); 3353 ASSERT(addr != NULL); 3354 ASSERT(size != NULL); 3355 3356 TNF_PROBE_0_DEBUG(hci1394_get_arresp_size_enter, 3357 HCI1394_TNF_HAL_STACK, ""); 3358 3359 if (tcode == IEEE1394_TCODE_WRITE_RESP) { 3360 *size = DESC_PKT_HDRLEN_AT_WRITE_RESP + IEEE1394_QUADLET; 3361 } else if (tcode == IEEE1394_TCODE_READ_QUADLET_RESP) { 3362 *size = DESC_PKT_HDRLEN_AT_READQUAD_RESP + IEEE1394_QUADLET; 3363 } else if (tcode == IEEE1394_TCODE_READ_BLOCK_RESP) { 3364 quadlet = hci1394_q_ar_get32(q_handle, &addr[3]); 3365 data_length = HCI1394_DESC_DATALEN_GET(quadlet); 3366 /* 3367 * response size is in quadlets, therefore we need to 3368 * make sure we count in the padding when figuring out 3369 * the size used up for this response 3370 */ 3371 *size = DESC_PKT_HDRLEN_AT_READBLOCK_RESP + 3372 HCI1394_ALIGN_QUAD(data_length) + IEEE1394_QUADLET; 3373 } else if (tcode == IEEE1394_TCODE_LOCK_RESP) { 3374 quadlet = hci1394_q_ar_get32(q_handle, &addr[3]); 3375 data_length = HCI1394_DESC_DATALEN_GET(quadlet); 3376 /* 3377 * response size is in quadlets, therefore we need to 3378 * make sure we count in the padding when figuring out 3379 * the size used up for this response 3380 */ 3381 *size = DESC_PKT_HDRLEN_AT_LOCK_RESP + 3382 HCI1394_ALIGN_QUAD(data_length) + IEEE1394_QUADLET; 3383 } else { 3384 TNF_PROBE_1(hci1394_async_arresp_size_tcode_err, 3385 HCI1394_TNF_HAL_ERROR, 3386 "unknown ARRESP received", tnf_uint, arresp_tcode, tcode); 3387 TNF_PROBE_0_DEBUG(hci1394_get_arresp_size_exit, 3388 HCI1394_TNF_HAL_STACK, ""); 3389 return (DDI_FAILURE); 3390 } 3391 3392 TNF_PROBE_0_DEBUG(hci1394_get_arresp_size_exit, 3393 HCI1394_TNF_HAL_STACK, ""); 3394 3395 return (DDI_SUCCESS); 3396 } 3397 3398 3399 /* 3400 * hci1394_async_pending_list_flush() 3401 * Flush out the ATREQ pending list. All commands still on the ATREQ pending 3402 * list are considered to be completed due to a bus reset. The ATREQ and 3403 * ARRESP Q's should be flushed before the pending Q is flushed. The ATREQ 3404 * could have more ACK pendings and the ARRESP could have valid responses to 3405 * pended requests. 3406 */ 3407 void 3408 hci1394_async_pending_list_flush(hci1394_async_handle_t async_handle) 3409 { 3410 hci1394_tlist_node_t *node; 3411 hci1394_async_cmd_t *hcicmd; 3412 3413 3414 ASSERT(async_handle != NULL); 3415 3416 TNF_PROBE_0_DEBUG(hci1394_async_pending_list_flush_enter, 3417 HCI1394_TNF_HAL_STACK, ""); 3418 3419 do { 3420 /* 3421 * get the first node on the pending list. This routine also 3422 * removes the node from the list. 3423 */ 3424 hci1394_tlist_get(async_handle->as_pending_list, &node); 3425 if (node != NULL) { 3426 /* set the command state to completed */ 3427 hcicmd = (hci1394_async_cmd_t *)node->tln_addr; 3428 hcicmd->ac_state = HCI1394_CMD_STATE_COMPLETED; 3429 3430 /* 3431 * Send the command up to the Services Layer with 3432 * completed due to the bus reset for status. 3433 */ 3434 h1394_cmd_is_complete( 3435 async_handle->as_drvinfo->di_sl_private, 3436 hcicmd->ac_cmd, H1394_AT_REQ, 3437 H1394_CMD_EBUSRESET); 3438 } 3439 } while (node != NULL); 3440 3441 TNF_PROBE_0_DEBUG(hci1394_async_pending_list_flush_exit, 3442 HCI1394_TNF_HAL_STACK, ""); 3443 } 3444 3445 3446 /* 3447 * hci1394_async_atreq_start() 3448 * Setup the command pointer for the first descriptor to be fetched and 3449 * then set the run bit. This routine will be called the first time 3450 * a descriptor is added to the Q. 3451 */ 3452 static void 3453 hci1394_async_atreq_start(void *async, uint32_t command_ptr) 3454 { 3455 hci1394_async_handle_t async_handle; 3456 ASSERT(async != NULL); 3457 TNF_PROBE_0_DEBUG(hci1394_async_atreq_start_enter, 3458 HCI1394_TNF_HAL_STACK, ""); 3459 async_handle = (hci1394_async_handle_t)async; 3460 hci1394_ohci_atreq_start(async_handle->as_ohci, command_ptr); 3461 TNF_PROBE_0_DEBUG(hci1394_async_atreq_start_exit, 3462 HCI1394_TNF_HAL_STACK, ""); 3463 } 3464 3465 3466 /* 3467 * hci1394_async_atreq_wake() 3468 * Set the wake bit for the ATREQ DMA engine. This routine will be called 3469 * from the Q logic after placing a descriptor on the Q. 3470 */ 3471 static void 3472 hci1394_async_atreq_wake(void *async) 3473 { 3474 hci1394_async_handle_t async_handle; 3475 ASSERT(async != NULL); 3476 TNF_PROBE_0_DEBUG(hci1394_async_atreq_wake_enter, 3477 HCI1394_TNF_HAL_STACK, ""); 3478 async_handle = (hci1394_async_handle_t)async; 3479 hci1394_ohci_atreq_wake(async_handle->as_ohci); 3480 TNF_PROBE_0_DEBUG(hci1394_async_atreq_wake_exit, 3481 HCI1394_TNF_HAL_STACK, ""); 3482 } 3483 3484 3485 /* 3486 * hci1394_async_atreq_reset() 3487 * Reset the atreq Q. The AT DMA engines must be stopped every bus reset. 3488 * They will restart when the next descriptor is added to the Q. We will stop 3489 * the DMA engine and then notify the Q logic that it has been stopped so it 3490 * knows to do a start next time it puts a descriptor on the Q. 3491 */ 3492 void 3493 hci1394_async_atreq_reset(hci1394_async_handle_t async_handle) 3494 { 3495 ASSERT(async_handle != NULL); 3496 TNF_PROBE_0_DEBUG(hci1394_async_atreq_reset_enter, 3497 HCI1394_TNF_HAL_STACK, ""); 3498 hci1394_ohci_atreq_stop(async_handle->as_ohci); 3499 hci1394_q_stop(async_handle->as_atreq_q); 3500 TNF_PROBE_0_DEBUG(hci1394_async_atreq_reset_exit, 3501 HCI1394_TNF_HAL_STACK, ""); 3502 } 3503 3504 3505 /* 3506 * hci1394_async_atreq_flush() 3507 * Flush out the atreq Q. This routine is called during bus reset processing. 3508 * it should be called before arresp_flush() and pending_list_flush(). 3509 */ 3510 static void 3511 hci1394_async_atreq_flush(hci1394_async_handle_t async_handle) 3512 { 3513 boolean_t request_available; 3514 int status; 3515 3516 ASSERT(async_handle != NULL); 3517 3518 TNF_PROBE_0_DEBUG(hci1394_async_atreq_flush_enter, 3519 HCI1394_TNF_HAL_STACK, ""); 3520 3521 /* Clear reqTxComplete interrupt */ 3522 hci1394_ohci_intr_clear(async_handle->as_ohci, OHCI_INTR_REQ_TX_CMPLT); 3523 3524 /* 3525 * Processes all Q'd AT requests. If the request is pended, it is 3526 * considered complete relative the the atreq engine. 3527 * flush_pending_list() will finish up the required processing for 3528 * pended requests. 3529 */ 3530 do { 3531 /* Flush the atreq Q. Process all Q'd commands */ 3532 status = hci1394_async_atreq_process(async_handle, 3533 B_TRUE, &request_available); 3534 if (status != DDI_SUCCESS) { 3535 TNF_PROBE_0(hci1394_async_atreq_process_fail, 3536 HCI1394_TNF_HAL_ERROR, ""); 3537 } 3538 } while (request_available == B_TRUE); 3539 3540 TNF_PROBE_0_DEBUG(hci1394_async_atreq_flush_exit, 3541 HCI1394_TNF_HAL_STACK, ""); 3542 } 3543 3544 3545 /* 3546 * hci1394_async_arresp_start() 3547 * Setup the command pointer for the first descriptor to be fetched and 3548 * then set the run bit. This routine will be called the first time 3549 * a descriptor is added to the Q. 3550 */ 3551 static void 3552 hci1394_async_arresp_start(void *async, uint32_t command_ptr) 3553 { 3554 hci1394_async_handle_t async_handle; 3555 ASSERT(async != NULL); 3556 TNF_PROBE_0_DEBUG(hci1394_async_arresp_start_enter, 3557 HCI1394_TNF_HAL_STACK, ""); 3558 async_handle = (hci1394_async_handle_t)async; 3559 hci1394_ohci_arresp_start(async_handle->as_ohci, command_ptr); 3560 TNF_PROBE_0_DEBUG(hci1394_async_arresp_start_exit, 3561 HCI1394_TNF_HAL_STACK, ""); 3562 } 3563 3564 3565 /* 3566 * hci1394_async_arresp_wake() 3567 * Set the wake bit for the ARRESP DMA engine. This routine will be called 3568 * from the Q logic after placing a descriptor on the Q. 3569 */ 3570 static void 3571 hci1394_async_arresp_wake(void *async) 3572 { 3573 hci1394_async_handle_t async_handle; 3574 ASSERT(async != NULL); 3575 TNF_PROBE_0_DEBUG(hci1394_async_arresp_wake_enter, 3576 HCI1394_TNF_HAL_STACK, ""); 3577 async_handle = (hci1394_async_handle_t)async; 3578 hci1394_ohci_arresp_wake(async_handle->as_ohci); 3579 TNF_PROBE_0_DEBUG(hci1394_async_arresp_wake_exit, 3580 HCI1394_TNF_HAL_STACK, ""); 3581 } 3582 3583 3584 /* 3585 * hci1394_async_arresp_flush() 3586 * Flush out the arresp Q. This routine is called during bus reset 3587 * processing. This should be called before pending_list_flush(). All 3588 * receive responses will be processed normally. The tlabels should 3589 * not be reset until after the ARRESP Q has been flushed. Otherwise 3590 * we would reject valid responses. 3591 */ 3592 static void 3593 hci1394_async_arresp_flush(hci1394_async_handle_t async_handle) 3594 { 3595 boolean_t response_available; 3596 int status; 3597 3598 3599 ASSERT(async_handle != NULL); 3600 3601 TNF_PROBE_0_DEBUG(hci1394_async_arresp_flush_enter, 3602 HCI1394_TNF_HAL_STACK, ""); 3603 3604 /* Clear reqTxComplete interrupt */ 3605 hci1394_ohci_intr_clear(async_handle->as_ohci, OHCI_INTR_RSPKT); 3606 3607 do { 3608 /* Flush the arresp Q. Process all received commands */ 3609 status = hci1394_async_arresp_process(async_handle, 3610 &response_available); 3611 if (status != DDI_SUCCESS) { 3612 TNF_PROBE_0(hci1394_async_arresp_process_fail, 3613 HCI1394_TNF_HAL_ERROR, ""); 3614 } 3615 } while (response_available == B_TRUE); 3616 3617 TNF_PROBE_0_DEBUG(hci1394_async_arresp_flush_enter, 3618 HCI1394_TNF_HAL_STACK, ""); 3619 } 3620 3621 3622 /* 3623 * hci1394_async_arreq_start() 3624 * Setup the command pointer for the first descriptor to be fetched and 3625 * then set the run bit. This routine will be called the first time 3626 * a descriptor is added to the Q. 3627 */ 3628 static void 3629 hci1394_async_arreq_start(void *async, uint32_t command_ptr) 3630 { 3631 hci1394_async_handle_t async_handle; 3632 ASSERT(async != NULL); 3633 TNF_PROBE_0_DEBUG(hci1394_async_arreq_start_enter, 3634 HCI1394_TNF_HAL_STACK, ""); 3635 async_handle = (hci1394_async_handle_t)async; 3636 hci1394_ohci_arreq_start(async_handle->as_ohci, command_ptr); 3637 TNF_PROBE_0_DEBUG(hci1394_async_arreq_start_exit, 3638 HCI1394_TNF_HAL_STACK, ""); 3639 } 3640 3641 3642 /* 3643 * hci1394_async_arreq_wake() 3644 * Set the wake bit for the ARREQ DMA engine. This routine will be called 3645 * from the Q logic after placing a descriptor on the Q. 3646 */ 3647 static void 3648 hci1394_async_arreq_wake(void *async) 3649 { 3650 hci1394_async_handle_t async_handle; 3651 ASSERT(async != NULL); 3652 TNF_PROBE_0_DEBUG(hci1394_async_arreq_wake_enter, 3653 HCI1394_TNF_HAL_STACK, ""); 3654 async_handle = (hci1394_async_handle_t)async; 3655 hci1394_ohci_arreq_wake(async_handle->as_ohci); 3656 TNF_PROBE_0_DEBUG(hci1394_async_arreq_wake_exit, 3657 HCI1394_TNF_HAL_STACK, ""); 3658 } 3659 3660 3661 /* 3662 * hci1394_async_arreq_flush() 3663 * Flush the ARREQ Q. This will flush up to the bus reset token in the 3664 * ARREQ. There is no order dependency for when routine should get called 3665 * (relative to the other Q flushing routines) 3666 */ 3667 static void 3668 hci1394_async_arreq_flush(hci1394_async_handle_t async_handle) 3669 { 3670 boolean_t request_available; 3671 int status; 3672 3673 3674 ASSERT(async_handle != NULL); 3675 TNF_PROBE_0_DEBUG(hci1394_async_arreq_flush_enter, 3676 HCI1394_TNF_HAL_STACK, ""); 3677 3678 /* 3679 * If the last bus reset token we have seen in 3680 * hci1394_async_arreq_read_phy() matches the current generation, the 3681 * ARREQ is already flushed. We have nothing further to do here so 3682 * return. This can happen if we are processing ARREQ's and a bus reset 3683 * occurs. Since we are already in the ISR, we will see the token before 3684 * the bus reset handler gets to run. 3685 */ 3686 if (async_handle->as_phy_reset == hci1394_ohci_current_busgen( 3687 async_handle->as_ohci)) { 3688 TNF_PROBE_0_DEBUG(hci1394_async_arreq_flush_exit, 3689 HCI1394_TNF_HAL_STACK, ""); 3690 return; 3691 } 3692 3693 /* 3694 * set flag to tell hci1394_async_arreq_process() that we should not 3695 * pass ARREQ's up to the Services Layer. This will be set to B_FALSE 3696 * in hci1394_async_arreq_read_phy() when a bus reset token matching 3697 * the current generation is found. 3698 */ 3699 async_handle->as_flushing_arreq = B_TRUE; 3700 3701 /* 3702 * Process all requests that have been received or until we find the 3703 * correct bus reset token. 3704 */ 3705 do { 3706 status = hci1394_async_arreq_process(async_handle, 3707 &request_available); 3708 if (status != DDI_SUCCESS) { 3709 TNF_PROBE_0(hci1394_isr_arreq_pr_fail, 3710 HCI1394_TNF_HAL_ERROR, ""); 3711 } 3712 } while ((request_available == B_TRUE) && 3713 (async_handle->as_flushing_arreq == B_TRUE)); 3714 3715 /* 3716 * Clear the asserted interrupt if there are no more ARREQ's to process. 3717 * We could have ARREQ's in the Q after the bus reset token since we 3718 * will set as_flushing_arreq to FALSE when we see the correct bus reset 3719 * token in hci1394_async_arreq_read_phy(). If there are more ARREQ's, 3720 * we will process them later after finishing the reset of bus reset 3721 * processing. That is why we will leave the interrupt asserted. 3722 */ 3723 if (request_available == B_FALSE) { 3724 hci1394_ohci_intr_clear(async_handle->as_ohci, OHCI_INTR_RQPKT); 3725 } 3726 3727 TNF_PROBE_0_DEBUG(hci1394_async_arreq_flush_exit, 3728 HCI1394_TNF_HAL_STACK, ""); 3729 } 3730 3731 3732 /* 3733 * hci1394_async_atresp_start() 3734 * Setup the command pointer for the first descriptor to be fetched and 3735 * then set the run bit. This routine will be called the first time 3736 * a descriptor is added to the Q. 3737 */ 3738 static void 3739 hci1394_async_atresp_start(void *async, uint32_t command_ptr) 3740 { 3741 hci1394_async_handle_t async_handle; 3742 ASSERT(async != NULL); 3743 TNF_PROBE_0_DEBUG(hci1394_async_atresp_start_enter, 3744 HCI1394_TNF_HAL_STACK, ""); 3745 async_handle = (hci1394_async_handle_t)async; 3746 hci1394_ohci_atresp_start(async_handle->as_ohci, command_ptr); 3747 TNF_PROBE_0_DEBUG(hci1394_async_atresp_start_exit, 3748 HCI1394_TNF_HAL_STACK, ""); 3749 } 3750 3751 3752 /* 3753 * hci1394_async_atresp_wake() 3754 * Set the wake bit for the ATRESP DMA engine. This routine will be called 3755 * from the Q logic after placing a descriptor on the Q. 3756 */ 3757 static void 3758 hci1394_async_atresp_wake(void *async) 3759 { 3760 hci1394_async_handle_t async_handle; 3761 ASSERT(async != NULL); 3762 TNF_PROBE_0_DEBUG(hci1394_async_atresp_wake_enter, 3763 HCI1394_TNF_HAL_STACK, ""); 3764 async_handle = (hci1394_async_handle_t)async; 3765 hci1394_ohci_atresp_wake(async_handle->as_ohci); 3766 TNF_PROBE_0_DEBUG(hci1394_async_atresp_wake_exit, 3767 HCI1394_TNF_HAL_STACK, ""); 3768 } 3769 3770 3771 /* 3772 * hci1394_async_atresp_reset() 3773 * Reset the atresp Q. The AT DMA engines must be stopped every bus reset. 3774 * They will restart when the next descriptor is added to the Q. We will stop 3775 * the DMA engine and then notify the Q logic that it has been stopped so it 3776 * knows to do a start next time it puts a descriptor on the Q. 3777 */ 3778 void 3779 hci1394_async_atresp_reset(hci1394_async_handle_t async_handle) 3780 { 3781 ASSERT(async_handle != NULL); 3782 TNF_PROBE_0_DEBUG(hci1394_async_atresp_reset_enter, 3783 HCI1394_TNF_HAL_STACK, ""); 3784 hci1394_ohci_atresp_stop(async_handle->as_ohci); 3785 hci1394_q_stop(async_handle->as_atresp_q); 3786 TNF_PROBE_0_DEBUG(hci1394_async_atresp_reset_exit, 3787 HCI1394_TNF_HAL_STACK, ""); 3788 } 3789 3790 3791 /* 3792 * hci1394_async_atresp_flush() 3793 * Flush all commands out of the atresp Q. This routine will be called 3794 * during bus reset processing. There is no order dependency for when 3795 * routine should get called (relative to the other Q flushing routines) 3796 */ 3797 static void 3798 hci1394_async_atresp_flush(hci1394_async_handle_t async_handle) 3799 { 3800 boolean_t response_available; 3801 int status; 3802 3803 ASSERT(async_handle != NULL); 3804 3805 TNF_PROBE_0_DEBUG(hci1394_async_atresp_flush_enter, 3806 HCI1394_TNF_HAL_STACK, ""); 3807 3808 /* Clear respTxComplete interrupt */ 3809 hci1394_ohci_intr_clear(async_handle->as_ohci, OHCI_INTR_RESP_TX_CMPLT); 3810 3811 /* Processes all AT responses */ 3812 do { 3813 /* Flush the atresp Q. Process all Q'd commands */ 3814 status = hci1394_async_atresp_process(async_handle, 3815 B_TRUE, &response_available); 3816 if (status != DDI_SUCCESS) { 3817 TNF_PROBE_0(hci1394_async_atresp_process_fail, 3818 HCI1394_TNF_HAL_ERROR, ""); 3819 } 3820 } while (response_available == B_TRUE); 3821 3822 TNF_PROBE_0_DEBUG(hci1394_async_atresp_flush_exit, 3823 HCI1394_TNF_HAL_STACK, ""); 3824 } 3825 3826 /* 3827 * hci1394_async_hcicmd_init() 3828 * Initialize the private HAL command structure. This should be called from 3829 * ATREQ and ARREQ routines. 3830 */ 3831 static void 3832 hci1394_async_hcicmd_init(hci1394_async_handle_t async_handle, 3833 cmd1394_cmd_t *cmd, h1394_cmd_priv_t *cmd_priv, 3834 hci1394_async_cmd_t **hcicmd) 3835 { 3836 *hcicmd = (hci1394_async_cmd_t *)cmd_priv->hal_overhead; 3837 (*hcicmd)->ac_cmd = cmd; 3838 (*hcicmd)->ac_priv = cmd_priv; 3839 (*hcicmd)->ac_async = async_handle; 3840 (*hcicmd)->ac_state = HCI1394_CMD_STATE_IN_PROGRESS; 3841 (*hcicmd)->ac_dest = 0; 3842 (*hcicmd)->ac_tlabel_alloc = B_TRUE; 3843 (*hcicmd)->ac_tlabel.tbi_tlabel = 0; 3844 (*hcicmd)->ac_tlabel.tbi_destination = 0; 3845 (*hcicmd)->ac_status = 0; 3846 (*hcicmd)->ac_qcmd.qc_timestamp = 0; 3847 (*hcicmd)->ac_qcmd.qc_arg = *hcicmd; 3848 (*hcicmd)->ac_qcmd.qc_generation = cmd_priv->bus_generation; 3849 (*hcicmd)->ac_mblk_alloc = B_FALSE; 3850 } 3851