xref: /linux/sound/pci/asihpi/hpimsgx.c (revision 320fefa9e2edc67011e235ea1d50f0d00ddfe004)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 
4     AudioScience HPI driver
5     Copyright (C) 1997-2014  AudioScience Inc. <support@audioscience.com>
6 
7 
8 Extended Message Function With Response Caching
9 
10 (C) Copyright AudioScience Inc. 2002
11 *****************************************************************************/
12 #define SOURCEFILE_NAME "hpimsgx.c"
13 #include "hpi_internal.h"
14 #include "hpi_version.h"
15 #include "hpimsginit.h"
16 #include "hpicmn.h"
17 #include "hpimsgx.h"
18 #include "hpidebug.h"
19 
20 static const struct pci_device_id asihpi_pci_tbl[] = {
21 #include "hpipcida.h"
22 };
23 
24 static struct hpios_spinlock msgx_lock;
25 
26 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
27 static int logging_enabled = 1;
28 
29 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
30 	*pci_info)
31 {
32 
33 	int i;
34 
35 	for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
36 		if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
37 			&& asihpi_pci_tbl[i].vendor !=
38 			pci_info->pci_dev->vendor)
39 			continue;
40 		if (asihpi_pci_tbl[i].device != PCI_ANY_ID
41 			&& asihpi_pci_tbl[i].device !=
42 			pci_info->pci_dev->device)
43 			continue;
44 		if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
45 			&& asihpi_pci_tbl[i].subvendor !=
46 			pci_info->pci_dev->subsystem_vendor)
47 			continue;
48 		if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
49 			&& asihpi_pci_tbl[i].subdevice !=
50 			pci_info->pci_dev->subsystem_device)
51 			continue;
52 
53 		/* HPI_DEBUG_LOG(DEBUG, " %x,%lx\n", i,
54 		   asihpi_pci_tbl[i].driver_data); */
55 		return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
56 	}
57 
58 	return NULL;
59 }
60 
61 static inline void hw_entry_point(struct hpi_message *phm,
62 	struct hpi_response *phr)
63 {
64 	if ((phm->adapter_index < HPI_MAX_ADAPTERS)
65 		&& hpi_entry_points[phm->adapter_index])
66 		hpi_entry_points[phm->adapter_index] (phm, phr);
67 	else
68 		hpi_init_response(phr, phm->object, phm->function,
69 			HPI_ERROR_PROCESSING_MESSAGE);
70 }
71 
72 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
73 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
74 
75 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
76 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
77 
78 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
79 	void *h_owner);
80 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
81 	void *h_owner);
82 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
83 	void *h_owner);
84 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
85 	void *h_owner);
86 
87 static void HPIMSGX__reset(u16 adapter_index);
88 
89 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
90 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
91 
92 #ifndef DISABLE_PRAGMA_PACK1
93 #pragma pack(push, 1)
94 #endif
95 
96 struct hpi_adapter_response {
97 	struct hpi_response_header h;
98 	struct hpi_adapter_res a;
99 };
100 
101 struct hpi_mixer_response {
102 	struct hpi_response_header h;
103 	struct hpi_mixer_res m;
104 };
105 
106 struct hpi_stream_response {
107 	struct hpi_response_header h;
108 	struct hpi_stream_res d;
109 };
110 
111 struct adapter_info {
112 	u16 type;
113 	u16 num_instreams;
114 	u16 num_outstreams;
115 };
116 
117 struct asi_open_state {
118 	int open_flag;
119 	void *h_owner;
120 };
121 
122 #ifndef DISABLE_PRAGMA_PACK1
123 #pragma pack(pop)
124 #endif
125 
126 /* Globals */
127 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
128 
129 static struct hpi_stream_response
130 	rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
131 
132 static struct hpi_stream_response
133 	rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
134 
135 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
136 
137 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
138 
139 /* use these to keep track of opens from user mode apps/DLLs */
140 static struct asi_open_state
141 	outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
142 
143 static struct asi_open_state
144 	instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
145 
146 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
147 	void *h_owner)
148 {
149 	if (phm->adapter_index != HPI_ADAPTER_INDEX_INVALID)
150 		HPI_DEBUG_LOG(WARNING,
151 			"suspicious adapter index %d in subsys message 0x%x.\n",
152 			phm->adapter_index, phm->function);
153 
154 	switch (phm->function) {
155 	case HPI_SUBSYS_GET_VERSION:
156 		hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
157 			HPI_SUBSYS_GET_VERSION, 0);
158 		phr->u.s.version = HPI_VER >> 8;	/* return major.minor */
159 		phr->u.s.data = HPI_VER;	/* return major.minor.release */
160 		break;
161 	case HPI_SUBSYS_OPEN:
162 		/*do not propagate the message down the chain */
163 		hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
164 		break;
165 	case HPI_SUBSYS_CLOSE:
166 		/*do not propagate the message down the chain */
167 		hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
168 			0);
169 		HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
170 		break;
171 	case HPI_SUBSYS_DRIVER_LOAD:
172 		/* Initialize this module's internal state */
173 		hpios_msgxlock_init(&msgx_lock);
174 		memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
175 		/* Init subsys_findadapters response to no-adapters */
176 		HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
177 		hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
178 			HPI_SUBSYS_DRIVER_LOAD, 0);
179 		/* individual HPIs dont implement driver load */
180 		HPI_COMMON(phm, phr);
181 		break;
182 	case HPI_SUBSYS_DRIVER_UNLOAD:
183 		HPI_COMMON(phm, phr);
184 		HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
185 		hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
186 			HPI_SUBSYS_DRIVER_UNLOAD, 0);
187 		return;
188 
189 	case HPI_SUBSYS_GET_NUM_ADAPTERS:
190 	case HPI_SUBSYS_GET_ADAPTER:
191 		HPI_COMMON(phm, phr);
192 		break;
193 
194 	case HPI_SUBSYS_CREATE_ADAPTER:
195 		HPIMSGX__init(phm, phr);
196 		break;
197 
198 	default:
199 		/* Must explicitly handle every subsys message in this switch */
200 		hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, phm->function,
201 			HPI_ERROR_INVALID_FUNC);
202 		break;
203 	}
204 }
205 
206 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
207 	void *h_owner)
208 {
209 	switch (phm->function) {
210 	case HPI_ADAPTER_OPEN:
211 		adapter_open(phm, phr);
212 		break;
213 	case HPI_ADAPTER_CLOSE:
214 		adapter_close(phm, phr);
215 		break;
216 	case HPI_ADAPTER_DELETE:
217 		HPIMSGX__cleanup(phm->adapter_index, h_owner);
218 		{
219 			struct hpi_message hm;
220 			struct hpi_response hr;
221 			hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
222 				HPI_ADAPTER_CLOSE);
223 			hm.adapter_index = phm->adapter_index;
224 			hw_entry_point(&hm, &hr);
225 		}
226 		hw_entry_point(phm, phr);
227 		break;
228 
229 	default:
230 		hw_entry_point(phm, phr);
231 		break;
232 	}
233 }
234 
235 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
236 {
237 	switch (phm->function) {
238 	case HPI_MIXER_OPEN:
239 		mixer_open(phm, phr);
240 		break;
241 	case HPI_MIXER_CLOSE:
242 		mixer_close(phm, phr);
243 		break;
244 	default:
245 		hw_entry_point(phm, phr);
246 		break;
247 	}
248 }
249 
250 static void outstream_message(struct hpi_message *phm,
251 	struct hpi_response *phr, void *h_owner)
252 {
253 	if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
254 		hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
255 			HPI_ERROR_INVALID_OBJ_INDEX);
256 		return;
257 	}
258 
259 	switch (phm->function) {
260 	case HPI_OSTREAM_OPEN:
261 		outstream_open(phm, phr, h_owner);
262 		break;
263 	case HPI_OSTREAM_CLOSE:
264 		outstream_close(phm, phr, h_owner);
265 		break;
266 	default:
267 		hw_entry_point(phm, phr);
268 		break;
269 	}
270 }
271 
272 static void instream_message(struct hpi_message *phm,
273 	struct hpi_response *phr, void *h_owner)
274 {
275 	if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
276 		hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
277 			HPI_ERROR_INVALID_OBJ_INDEX);
278 		return;
279 	}
280 
281 	switch (phm->function) {
282 	case HPI_ISTREAM_OPEN:
283 		instream_open(phm, phr, h_owner);
284 		break;
285 	case HPI_ISTREAM_CLOSE:
286 		instream_close(phm, phr, h_owner);
287 		break;
288 	default:
289 		hw_entry_point(phm, phr);
290 		break;
291 	}
292 }
293 
294 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
295  * HPI_MessageEx so that functions in hpifunc.c compile.
296  */
297 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
298 	void *h_owner)
299 {
300 
301 	if (logging_enabled)
302 		HPI_DEBUG_MESSAGE(DEBUG, phm);
303 
304 	if (phm->type != HPI_TYPE_REQUEST) {
305 		hpi_init_response(phr, phm->object, phm->function,
306 			HPI_ERROR_INVALID_TYPE);
307 		return;
308 	}
309 
310 	if (phm->adapter_index >= HPI_MAX_ADAPTERS
311 		&& phm->adapter_index != HPIMSGX_ALLADAPTERS) {
312 		hpi_init_response(phr, phm->object, phm->function,
313 			HPI_ERROR_BAD_ADAPTER_NUMBER);
314 		return;
315 	}
316 
317 	switch (phm->object) {
318 	case HPI_OBJ_SUBSYSTEM:
319 		subsys_message(phm, phr, h_owner);
320 		break;
321 
322 	case HPI_OBJ_ADAPTER:
323 		adapter_message(phm, phr, h_owner);
324 		break;
325 
326 	case HPI_OBJ_MIXER:
327 		mixer_message(phm, phr);
328 		break;
329 
330 	case HPI_OBJ_OSTREAM:
331 		outstream_message(phm, phr, h_owner);
332 		break;
333 
334 	case HPI_OBJ_ISTREAM:
335 		instream_message(phm, phr, h_owner);
336 		break;
337 
338 	default:
339 		hw_entry_point(phm, phr);
340 		break;
341 	}
342 
343 	if (logging_enabled)
344 		HPI_DEBUG_RESPONSE(phr);
345 
346 	if (phr->error >= HPI_ERROR_DSP_COMMUNICATION) {
347 		hpi_debug_level_set(HPI_DEBUG_LEVEL_ERROR);
348 		logging_enabled = 0;
349 	}
350 }
351 
352 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
353 {
354 	HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
355 	memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
356 		sizeof(rESP_HPI_ADAPTER_OPEN[0]));
357 }
358 
359 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
360 {
361 	HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
362 	hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
363 }
364 
365 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
366 {
367 	memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
368 		sizeof(rESP_HPI_MIXER_OPEN[0]));
369 }
370 
371 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
372 {
373 	hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
374 }
375 
376 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
377 	void *h_owner)
378 {
379 
380 	struct hpi_message hm;
381 	struct hpi_response hr;
382 
383 	hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
384 
385 	hpios_msgxlock_lock(&msgx_lock);
386 
387 	if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
388 		phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
389 	else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
390 		[phm->obj_index].h.error)
391 		memcpy(phr,
392 			&rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
393 				obj_index],
394 			sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
395 	else {
396 		instream_user_open[phm->adapter_index][phm->
397 			obj_index].open_flag = 1;
398 		hpios_msgxlock_unlock(&msgx_lock);
399 
400 		/* issue a reset */
401 		hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
402 			HPI_ISTREAM_RESET);
403 		hm.adapter_index = phm->adapter_index;
404 		hm.obj_index = phm->obj_index;
405 		hw_entry_point(&hm, &hr);
406 
407 		hpios_msgxlock_lock(&msgx_lock);
408 		if (hr.error) {
409 			instream_user_open[phm->adapter_index][phm->
410 				obj_index].open_flag = 0;
411 			phr->error = hr.error;
412 		} else {
413 			instream_user_open[phm->adapter_index][phm->
414 				obj_index].open_flag = 1;
415 			instream_user_open[phm->adapter_index][phm->
416 				obj_index].h_owner = h_owner;
417 			memcpy(phr,
418 				&rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
419 				[phm->obj_index],
420 				sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
421 		}
422 	}
423 	hpios_msgxlock_unlock(&msgx_lock);
424 }
425 
426 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
427 	void *h_owner)
428 {
429 
430 	struct hpi_message hm;
431 	struct hpi_response hr;
432 
433 	hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
434 
435 	hpios_msgxlock_lock(&msgx_lock);
436 	if (h_owner ==
437 		instream_user_open[phm->adapter_index][phm->
438 			obj_index].h_owner) {
439 		/* HPI_DEBUG_LOG(INFO,"closing adapter %d "
440 		   "instream %d owned by %p\n",
441 		   phm->wAdapterIndex, phm->wObjIndex, hOwner); */
442 		instream_user_open[phm->adapter_index][phm->
443 			obj_index].h_owner = NULL;
444 		hpios_msgxlock_unlock(&msgx_lock);
445 		/* issue a reset */
446 		hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
447 			HPI_ISTREAM_RESET);
448 		hm.adapter_index = phm->adapter_index;
449 		hm.obj_index = phm->obj_index;
450 		hw_entry_point(&hm, &hr);
451 		hpios_msgxlock_lock(&msgx_lock);
452 		if (hr.error) {
453 			instream_user_open[phm->adapter_index][phm->
454 				obj_index].h_owner = h_owner;
455 			phr->error = hr.error;
456 		} else {
457 			instream_user_open[phm->adapter_index][phm->
458 				obj_index].open_flag = 0;
459 			instream_user_open[phm->adapter_index][phm->
460 				obj_index].h_owner = NULL;
461 		}
462 	} else {
463 		HPI_DEBUG_LOG(WARNING,
464 			"%p trying to close %d instream %d owned by %p\n",
465 			h_owner, phm->adapter_index, phm->obj_index,
466 			instream_user_open[phm->adapter_index][phm->
467 				obj_index].h_owner);
468 		phr->error = HPI_ERROR_OBJ_NOT_OPEN;
469 	}
470 	hpios_msgxlock_unlock(&msgx_lock);
471 }
472 
473 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
474 	void *h_owner)
475 {
476 
477 	struct hpi_message hm;
478 	struct hpi_response hr;
479 
480 	hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
481 
482 	hpios_msgxlock_lock(&msgx_lock);
483 
484 	if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
485 		phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
486 	else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
487 		[phm->obj_index].h.error)
488 		memcpy(phr,
489 			&rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
490 				obj_index],
491 			sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
492 	else {
493 		outstream_user_open[phm->adapter_index][phm->
494 			obj_index].open_flag = 1;
495 		hpios_msgxlock_unlock(&msgx_lock);
496 
497 		/* issue a reset */
498 		hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
499 			HPI_OSTREAM_RESET);
500 		hm.adapter_index = phm->adapter_index;
501 		hm.obj_index = phm->obj_index;
502 		hw_entry_point(&hm, &hr);
503 
504 		hpios_msgxlock_lock(&msgx_lock);
505 		if (hr.error) {
506 			outstream_user_open[phm->adapter_index][phm->
507 				obj_index].open_flag = 0;
508 			phr->error = hr.error;
509 		} else {
510 			outstream_user_open[phm->adapter_index][phm->
511 				obj_index].open_flag = 1;
512 			outstream_user_open[phm->adapter_index][phm->
513 				obj_index].h_owner = h_owner;
514 			memcpy(phr,
515 				&rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
516 				[phm->obj_index],
517 				sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
518 		}
519 	}
520 	hpios_msgxlock_unlock(&msgx_lock);
521 }
522 
523 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
524 	void *h_owner)
525 {
526 
527 	struct hpi_message hm;
528 	struct hpi_response hr;
529 
530 	hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
531 
532 	hpios_msgxlock_lock(&msgx_lock);
533 
534 	if (h_owner ==
535 		outstream_user_open[phm->adapter_index][phm->
536 			obj_index].h_owner) {
537 		/* HPI_DEBUG_LOG(INFO,"closing adapter %d "
538 		   "outstream %d owned by %p\n",
539 		   phm->wAdapterIndex, phm->wObjIndex, hOwner); */
540 		outstream_user_open[phm->adapter_index][phm->
541 			obj_index].h_owner = NULL;
542 		hpios_msgxlock_unlock(&msgx_lock);
543 		/* issue a reset */
544 		hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
545 			HPI_OSTREAM_RESET);
546 		hm.adapter_index = phm->adapter_index;
547 		hm.obj_index = phm->obj_index;
548 		hw_entry_point(&hm, &hr);
549 		hpios_msgxlock_lock(&msgx_lock);
550 		if (hr.error) {
551 			outstream_user_open[phm->adapter_index][phm->
552 				obj_index].h_owner = h_owner;
553 			phr->error = hr.error;
554 		} else {
555 			outstream_user_open[phm->adapter_index][phm->
556 				obj_index].open_flag = 0;
557 			outstream_user_open[phm->adapter_index][phm->
558 				obj_index].h_owner = NULL;
559 		}
560 	} else {
561 		HPI_DEBUG_LOG(WARNING,
562 			"%p trying to close %d outstream %d owned by %p\n",
563 			h_owner, phm->adapter_index, phm->obj_index,
564 			outstream_user_open[phm->adapter_index][phm->
565 				obj_index].h_owner);
566 		phr->error = HPI_ERROR_OBJ_NOT_OPEN;
567 	}
568 	hpios_msgxlock_unlock(&msgx_lock);
569 }
570 
571 static u16 adapter_prepare(u16 adapter)
572 {
573 	struct hpi_message hm;
574 	struct hpi_response hr;
575 
576 	/* Open the adapter and streams */
577 	u16 i;
578 
579 	/* call to HPI_ADAPTER_OPEN */
580 	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
581 		HPI_ADAPTER_OPEN);
582 	hm.adapter_index = adapter;
583 	hw_entry_point(&hm, &hr);
584 	memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
585 		sizeof(rESP_HPI_ADAPTER_OPEN[0]));
586 	if (hr.error)
587 		return hr.error;
588 
589 	/* call to HPI_ADAPTER_GET_INFO */
590 	hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
591 		HPI_ADAPTER_GET_INFO);
592 	hm.adapter_index = adapter;
593 	hw_entry_point(&hm, &hr);
594 	if (hr.error)
595 		return hr.error;
596 
597 	aDAPTER_INFO[adapter].num_outstreams = hr.u.ax.info.num_outstreams;
598 	aDAPTER_INFO[adapter].num_instreams = hr.u.ax.info.num_instreams;
599 	aDAPTER_INFO[adapter].type = hr.u.ax.info.adapter_type;
600 
601 	/* call to HPI_OSTREAM_OPEN */
602 	for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
603 		hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
604 			HPI_OSTREAM_OPEN);
605 		hm.adapter_index = adapter;
606 		hm.obj_index = i;
607 		hw_entry_point(&hm, &hr);
608 		memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
609 			sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
610 		outstream_user_open[adapter][i].open_flag = 0;
611 		outstream_user_open[adapter][i].h_owner = NULL;
612 	}
613 
614 	/* call to HPI_ISTREAM_OPEN */
615 	for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
616 		hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
617 			HPI_ISTREAM_OPEN);
618 		hm.adapter_index = adapter;
619 		hm.obj_index = i;
620 		hw_entry_point(&hm, &hr);
621 		memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
622 			sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
623 		instream_user_open[adapter][i].open_flag = 0;
624 		instream_user_open[adapter][i].h_owner = NULL;
625 	}
626 
627 	/* call to HPI_MIXER_OPEN */
628 	hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
629 	hm.adapter_index = adapter;
630 	hw_entry_point(&hm, &hr);
631 	memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
632 		sizeof(rESP_HPI_MIXER_OPEN[0]));
633 
634 	return 0;
635 }
636 
637 static void HPIMSGX__reset(u16 adapter_index)
638 {
639 	int i;
640 	u16 adapter;
641 	struct hpi_response hr;
642 
643 	if (adapter_index == HPIMSGX_ALLADAPTERS) {
644 		for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
645 
646 			hpi_init_response(&hr, HPI_OBJ_ADAPTER,
647 				HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
648 			memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
649 				sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
650 
651 			hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
652 				HPI_ERROR_INVALID_OBJ);
653 			memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
654 				sizeof(rESP_HPI_MIXER_OPEN[adapter]));
655 
656 			for (i = 0; i < HPI_MAX_STREAMS; i++) {
657 				hpi_init_response(&hr, HPI_OBJ_OSTREAM,
658 					HPI_OSTREAM_OPEN,
659 					HPI_ERROR_INVALID_OBJ);
660 				memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
661 					&hr,
662 					sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
663 						[i]));
664 				hpi_init_response(&hr, HPI_OBJ_ISTREAM,
665 					HPI_ISTREAM_OPEN,
666 					HPI_ERROR_INVALID_OBJ);
667 				memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
668 					&hr,
669 					sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
670 						[i]));
671 			}
672 		}
673 	} else if (adapter_index < HPI_MAX_ADAPTERS) {
674 		rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
675 			HPI_ERROR_BAD_ADAPTER;
676 		rESP_HPI_MIXER_OPEN[adapter_index].h.error =
677 			HPI_ERROR_INVALID_OBJ;
678 		for (i = 0; i < HPI_MAX_STREAMS; i++) {
679 			rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
680 				HPI_ERROR_INVALID_OBJ;
681 			rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
682 				HPI_ERROR_INVALID_OBJ;
683 		}
684 	}
685 }
686 
687 static u16 HPIMSGX__init(struct hpi_message *phm,
688 	/* HPI_SUBSYS_CREATE_ADAPTER structure with */
689 	/* resource list or NULL=find all */
690 	struct hpi_response *phr
691 	/* response from HPI_ADAPTER_GET_INFO */
692 	)
693 {
694 	hpi_handler_func *entry_point_func;
695 	struct hpi_response hr;
696 
697 	/* Init response here so we can pass in previous adapter list */
698 	hpi_init_response(&hr, phm->object, phm->function,
699 		HPI_ERROR_INVALID_OBJ);
700 
701 	entry_point_func =
702 		hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
703 
704 	if (entry_point_func) {
705 		HPI_DEBUG_MESSAGE(DEBUG, phm);
706 		entry_point_func(phm, &hr);
707 	} else {
708 		phr->error = HPI_ERROR_PROCESSING_MESSAGE;
709 		return phr->error;
710 	}
711 	if (hr.error == 0) {
712 		/* the adapter was created successfully
713 		   save the mapping for future use */
714 		hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
715 		/* prepare adapter (pre-open streams etc.) */
716 		HPI_DEBUG_LOG(DEBUG,
717 			"HPI_SUBSYS_CREATE_ADAPTER successful,"
718 			" preparing adapter\n");
719 		adapter_prepare(hr.u.s.adapter_index);
720 	}
721 	memcpy(phr, &hr, hr.size);
722 	return phr->error;
723 }
724 
725 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
726 {
727 	int i, adapter, adapter_limit;
728 
729 	if (!h_owner)
730 		return;
731 
732 	if (adapter_index == HPIMSGX_ALLADAPTERS) {
733 		adapter = 0;
734 		adapter_limit = HPI_MAX_ADAPTERS;
735 	} else {
736 		adapter = adapter_index;
737 		adapter_limit = adapter + 1;
738 	}
739 
740 	for (; adapter < adapter_limit; adapter++) {
741 		/*      printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
742 		for (i = 0; i < HPI_MAX_STREAMS; i++) {
743 			if (h_owner ==
744 				outstream_user_open[adapter][i].h_owner) {
745 				struct hpi_message hm;
746 				struct hpi_response hr;
747 
748 				HPI_DEBUG_LOG(DEBUG,
749 					"Close adapter %d ostream %d\n",
750 					adapter, i);
751 
752 				hpi_init_message_response(&hm, &hr,
753 					HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
754 				hm.adapter_index = (u16)adapter;
755 				hm.obj_index = (u16)i;
756 				hw_entry_point(&hm, &hr);
757 
758 				hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
759 				hw_entry_point(&hm, &hr);
760 
761 				hm.function = HPI_OSTREAM_GROUP_RESET;
762 				hw_entry_point(&hm, &hr);
763 
764 				outstream_user_open[adapter][i].open_flag = 0;
765 				outstream_user_open[adapter][i].h_owner =
766 					NULL;
767 			}
768 			if (h_owner == instream_user_open[adapter][i].h_owner) {
769 				struct hpi_message hm;
770 				struct hpi_response hr;
771 
772 				HPI_DEBUG_LOG(DEBUG,
773 					"Close adapter %d istream %d\n",
774 					adapter, i);
775 
776 				hpi_init_message_response(&hm, &hr,
777 					HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
778 				hm.adapter_index = (u16)adapter;
779 				hm.obj_index = (u16)i;
780 				hw_entry_point(&hm, &hr);
781 
782 				hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
783 				hw_entry_point(&hm, &hr);
784 
785 				hm.function = HPI_ISTREAM_GROUP_RESET;
786 				hw_entry_point(&hm, &hr);
787 
788 				instream_user_open[adapter][i].open_flag = 0;
789 				instream_user_open[adapter][i].h_owner = NULL;
790 			}
791 		}
792 	}
793 }
794