nvme_private.h (547d523eb8a492442728976a98930c8bbf626260) nvme_private.h (1e526bc478edf4fbcefa27a1285fa7bb624f3725)
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 109 unchanged lines hidden (view full) ---

118extern int32_t nvme_retry_count;
119
120struct nvme_completion_poll_status {
121
122 struct nvme_completion cpl;
123 boolean_t done;
124};
125
1/*-
2 * Copyright (C) 2012 Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 109 unchanged lines hidden (view full) ---

118extern int32_t nvme_retry_count;
119
120struct nvme_completion_poll_status {
121
122 struct nvme_completion cpl;
123 boolean_t done;
124};
125
126#define NVME_REQUEST_VADDR 1
127#define NVME_REQUEST_NULL 2 /* For requests with no payload. */
128#define NVME_REQUEST_UIO 3
129
126struct nvme_request {
127
128 struct nvme_command cmd;
129 struct nvme_qpair *qpair;
130struct nvme_request {
131
132 struct nvme_command cmd;
133 struct nvme_qpair *qpair;
130 void *payload;
134 union {
135 void *payload;
136 struct uio *uio;
137 } u;
138 uint32_t type;
131 uint32_t payload_size;
132 boolean_t timeout;
139 uint32_t payload_size;
140 boolean_t timeout;
133 struct uio *uio;
134 nvme_cb_fn_t cb_fn;
135 void *cb_arg;
136 int32_t retries;
137 STAILQ_ENTRY(nvme_request) stailq;
138};
139
140struct nvme_async_event_request {
141

--- 335 unchanged lines hidden (view full) ---

477 req->cb_fn = cb_fn;
478 req->cb_arg = cb_arg;
479 req->timeout = TRUE;
480 }
481 return (req);
482}
483
484static __inline struct nvme_request *
141 nvme_cb_fn_t cb_fn;
142 void *cb_arg;
143 int32_t retries;
144 STAILQ_ENTRY(nvme_request) stailq;
145};
146
147struct nvme_async_event_request {
148

--- 335 unchanged lines hidden (view full) ---

484 req->cb_fn = cb_fn;
485 req->cb_arg = cb_arg;
486 req->timeout = TRUE;
487 }
488 return (req);
489}
490
491static __inline struct nvme_request *
485nvme_allocate_request(void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
486 void *cb_arg)
492nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
493 nvme_cb_fn_t cb_fn, void *cb_arg)
487{
488 struct nvme_request *req;
489
490 req = _nvme_allocate_request(cb_fn, cb_arg);
491 if (req != NULL) {
494{
495 struct nvme_request *req;
496
497 req = _nvme_allocate_request(cb_fn, cb_arg);
498 if (req != NULL) {
492 req->payload = payload;
499 req->type = NVME_REQUEST_VADDR;
500 req->u.payload = payload;
493 req->payload_size = payload_size;
494 }
495 return (req);
496}
497
498static __inline struct nvme_request *
501 req->payload_size = payload_size;
502 }
503 return (req);
504}
505
506static __inline struct nvme_request *
499nvme_allocate_request_uio(struct uio *uio, nvme_cb_fn_t cb_fn, void *cb_arg)
507nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
500{
501 struct nvme_request *req;
502
503 req = _nvme_allocate_request(cb_fn, cb_arg);
504 if (req != NULL)
508{
509 struct nvme_request *req;
510
511 req = _nvme_allocate_request(cb_fn, cb_arg);
512 if (req != NULL)
505 req->uio = uio;
513 req->type = NVME_REQUEST_NULL;
506 return (req);
507}
508
514 return (req);
515}
516
517static __inline struct nvme_request *
518nvme_allocate_request_uio(struct uio *uio, nvme_cb_fn_t cb_fn, void *cb_arg)
519{
520 struct nvme_request *req;
521
522 req = _nvme_allocate_request(cb_fn, cb_arg);
523 if (req != NULL) {
524 req->type = NVME_REQUEST_UIO;
525 req->u.uio = uio;
526 }
527 return (req);
528}
529
509#define nvme_free_request(req) uma_zfree(nvme_request_zone, req)
510
511void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
512 const struct nvme_completion *async_cpl,
513 uint32_t log_page_id, void *log_page_buffer,
514 uint32_t log_page_size);
515void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
516
517#endif /* __NVME_PRIVATE_H__ */
530#define nvme_free_request(req) uma_zfree(nvme_request_zone, req)
531
532void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
533 const struct nvme_completion *async_cpl,
534 uint32_t log_page_id, void *log_page_buffer,
535 uint32_t log_page_size);
536void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
537
538#endif /* __NVME_PRIVATE_H__ */