112f080e7Smrj /* 212f080e7Smrj * CDDL HEADER START 312f080e7Smrj * 412f080e7Smrj * The contents of this file are subject to the terms of the 586c1f4dcSVikram Hegde * Common Development and Distribution License (the "License"). 686c1f4dcSVikram Hegde * You may not use this file except in compliance with the License. 712f080e7Smrj * 812f080e7Smrj * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 912f080e7Smrj * or http://www.opensolaris.org/os/licensing. 1012f080e7Smrj * See the License for the specific language governing permissions 1112f080e7Smrj * and limitations under the License. 1212f080e7Smrj * 1312f080e7Smrj * When distributing Covered Code, include this CDDL HEADER in each 1412f080e7Smrj * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1512f080e7Smrj * If applicable, add the following below this CDDL HEADER, with the 1612f080e7Smrj * fields enclosed by brackets "[]" replaced with your own identifying 1712f080e7Smrj * information: Portions Copyright [yyyy] [name of copyright owner] 1812f080e7Smrj * 1912f080e7Smrj * CDDL HEADER END 2012f080e7Smrj */ 2112f080e7Smrj /* 2286c1f4dcSVikram Hegde * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 2312f080e7Smrj * Use is subject to license terms. 2412f080e7Smrj */ 2512f080e7Smrj 2612f080e7Smrj #ifndef _SYS_ROOTNEX_H 2712f080e7Smrj #define _SYS_ROOTNEX_H 2812f080e7Smrj 2912f080e7Smrj /* 3012f080e7Smrj * x86 root nexus implementation specific state 3112f080e7Smrj */ 3212f080e7Smrj 3312f080e7Smrj #include <sys/types.h> 3412f080e7Smrj #include <sys/conf.h> 3512f080e7Smrj #include <sys/modctl.h> 3612f080e7Smrj #include <sys/sunddi.h> 37*20906b23SVikram Hegde #include <sys/iommulib.h> 3812f080e7Smrj 3912f080e7Smrj #ifdef __cplusplus 4012f080e7Smrj extern "C" { 4112f080e7Smrj #endif 4212f080e7Smrj 4312f080e7Smrj 4412f080e7Smrj /* size of buffer used for ctlop reportdev */ 4512f080e7Smrj #define REPORTDEV_BUFSIZE 1024 4612f080e7Smrj 4712f080e7Smrj /* min and max interrupt vectors */ 4812f080e7Smrj #define VEC_MIN 1 4912f080e7Smrj #define VEC_MAX 255 5012f080e7Smrj 5112f080e7Smrj /* atomic increment/decrement to keep track of outstanding binds, etc */ 5212f080e7Smrj #define ROOTNEX_PROF_INC(addr) atomic_inc_64(addr) 5312f080e7Smrj #define ROOTNEX_PROF_DEC(addr) atomic_add_64(addr, -1) 5412f080e7Smrj 5512f080e7Smrj /* set in dmac_type to signify that this cookie uses the copy buffer */ 5612f080e7Smrj #define ROOTNEX_USES_COPYBUF 0x80000000 5712f080e7Smrj 5812f080e7Smrj /* 5912f080e7Smrj * integer or boolean property name and value. A few static rootnex properties 6012f080e7Smrj * are created during rootnex attach from an array of rootnex_intprop_t.. 6112f080e7Smrj */ 6212f080e7Smrj typedef struct rootnex_intprop_s { 6312f080e7Smrj char *prop_name; 6412f080e7Smrj int prop_value; 6512f080e7Smrj } rootnex_intprop_t; 6612f080e7Smrj 6712f080e7Smrj /* 6812f080e7Smrj * sgl related information which is visible to rootnex_get_sgl(). Trying to 6912f080e7Smrj * isolate get_sgl() as much as possible so it can be easily replaced. 7012f080e7Smrj */ 7112f080e7Smrj typedef struct rootnex_sglinfo_s { 7212f080e7Smrj /* 7312f080e7Smrj * These are passed into rootnex_get_sgl(). 7412f080e7Smrj * 7512f080e7Smrj * si_min_addr - the minimum physical address 7612f080e7Smrj * si_max_addr - the maximum physical address 7712f080e7Smrj * si_max_cookie_size - the maximum size of a physically contiguous 7812f080e7Smrj * piece of memory that we can handle in a sgl. 7912f080e7Smrj * si_segmask - segment mask to determine if we cross a segment boundary 8012f080e7Smrj * si_max_pages - max number of pages this sgl could occupy (which 8112f080e7Smrj * is also the maximum number of cookies we might see. 8212f080e7Smrj */ 8312f080e7Smrj uint64_t si_min_addr; 8412f080e7Smrj uint64_t si_max_addr; 8512f080e7Smrj uint64_t si_max_cookie_size; 8612f080e7Smrj uint64_t si_segmask; 8712f080e7Smrj uint_t si_max_pages; 8812f080e7Smrj 8912f080e7Smrj /* 9012f080e7Smrj * these are returned by rootnex_get_sgl() 9112f080e7Smrj * 9212f080e7Smrj * si_copybuf_req - amount of copy buffer needed by the buffer. 9312f080e7Smrj * si_buf_offset - The initial offset into the first page of the buffer. 9412f080e7Smrj * It's set in get sgl and used in the bind slow path to help 9512f080e7Smrj * calculate the current page index & offset from the current offset 9612f080e7Smrj * which is relative to the start of the buffer. 9712f080e7Smrj * si_asp - address space of buffer passed in. 9812f080e7Smrj * si_sgl_size - The actual number of cookies in the sgl. This does 9912f080e7Smrj * not reflect and sharing that we might do on window boundaries. 10012f080e7Smrj */ 10112f080e7Smrj size_t si_copybuf_req; 10212f080e7Smrj off_t si_buf_offset; 10312f080e7Smrj struct as *si_asp; 10412f080e7Smrj uint_t si_sgl_size; 10512f080e7Smrj } rootnex_sglinfo_t; 10612f080e7Smrj 10712f080e7Smrj /* 10812f080e7Smrj * When we have to use the copy buffer, we allocate one of these structures per 10912f080e7Smrj * buffer page to track which pages need the copy buffer, what the kernel 11012f080e7Smrj * virtual address is (which the device can't reach), and what the copy buffer 11112f080e7Smrj * virtual address is (where the device dma's to/from). For 32-bit kernels, 11212f080e7Smrj * since we can't use seg kpm, we also need to keep the page_t around and state 11312f080e7Smrj * if we've currently mapped in the page into KVA space for buffers which don't 11412f080e7Smrj * have kva already and when we have multiple windows because we used up all our 11512f080e7Smrj * copy buffer space. 11612f080e7Smrj */ 11712f080e7Smrj typedef struct rootnex_pgmap_s { 11812f080e7Smrj boolean_t pm_uses_copybuf; 11912f080e7Smrj #if !defined(__amd64) 12012f080e7Smrj boolean_t pm_mapped; 12112f080e7Smrj page_t *pm_pp; 12212f080e7Smrj caddr_t pm_vaddr; 12312f080e7Smrj #endif 12412f080e7Smrj caddr_t pm_kaddr; 12512f080e7Smrj caddr_t pm_cbaddr; 12612f080e7Smrj } rootnex_pgmap_t; 12712f080e7Smrj 12812f080e7Smrj /* 12912f080e7Smrj * We only need to trim a buffer when we have multiple windows. Each window has 13012f080e7Smrj * trim state. We might have trimmed the end of the previous window, leaving the 13112f080e7Smrj * first cookie of this window trimmed[tr_trim_first] (which basically means we 13212f080e7Smrj * won't start with a new cookie), or we might need to trim the end of the 13312f080e7Smrj * current window [tr_trim_last] (which basically means we won't end with a 13412f080e7Smrj * complete cookie). We keep the same state for the first & last cookie in a 13512f080e7Smrj * window (a window can have one or more cookies). However, when we trim the 13612f080e7Smrj * last cookie, we keep a pointer to the last cookie in the trim state since we 13712f080e7Smrj * only need this info when we trim. The pointer to the first cookie in the 13812f080e7Smrj * window is in the window state since we need to know what the first cookie in 13912f080e7Smrj * the window is in various places. 14012f080e7Smrj * 14112f080e7Smrj * If we do trim a cookie, we save away the physical address and size of the 14212f080e7Smrj * cookie so that we can over write the cookie when we switch windows (the 14312f080e7Smrj * space for a cookie which is in two windows is shared between the windows. 14412f080e7Smrj * We keep around the same information for the last page in a window. 14512f080e7Smrj * 14612f080e7Smrj * if we happened to trim on a page that uses the copy buffer, and that page 14712f080e7Smrj * is also in the middle of a window boundary because we have filled up the 14812f080e7Smrj * copy buffer, we need to remember the copy buffer address for both windows 14912f080e7Smrj * since the same page will have different copy buffer addresses in the two 15012f080e7Smrj * windows. We need to due the same for kaddr in the 32-bit kernel since we 15112f080e7Smrj * have a limited kva space which we map to. 15212f080e7Smrj */ 15312f080e7Smrj typedef struct rootnex_trim_s { 15412f080e7Smrj boolean_t tr_trim_first; 15512f080e7Smrj boolean_t tr_trim_last; 15612f080e7Smrj ddi_dma_cookie_t *tr_last_cookie; 15712f080e7Smrj uint64_t tr_first_paddr; 15812f080e7Smrj uint64_t tr_last_paddr; 15912f080e7Smrj size_t tr_first_size; 16012f080e7Smrj size_t tr_last_size; 16112f080e7Smrj 16212f080e7Smrj boolean_t tr_first_copybuf_win; 16312f080e7Smrj boolean_t tr_last_copybuf_win; 16412f080e7Smrj uint_t tr_first_pidx; 16512f080e7Smrj uint_t tr_last_pidx; 16612f080e7Smrj caddr_t tr_first_cbaddr; 16712f080e7Smrj caddr_t tr_last_cbaddr; 16812f080e7Smrj #if !defined(__amd64) 16912f080e7Smrj caddr_t tr_first_kaddr; 17012f080e7Smrj caddr_t tr_last_kaddr; 17112f080e7Smrj #endif 17212f080e7Smrj } rootnex_trim_t; 17312f080e7Smrj 17412f080e7Smrj /* 17512f080e7Smrj * per window state. A bound DMA handle can have multiple windows. Each window 17612f080e7Smrj * will have the following state. We track if this window needs to sync, 17712f080e7Smrj * the offset into the buffer where the window starts, the size of the window. 17812f080e7Smrj * a pointer to the first cookie in the window, the number of cookies in the 17912f080e7Smrj * window, and the trim state for the window. For the 32-bit kernel, we keep 18012f080e7Smrj * track of if we need to remap the copy buffer when we switch to a this window 18112f080e7Smrj */ 18212f080e7Smrj typedef struct rootnex_window_s { 18312f080e7Smrj boolean_t wd_dosync; 18412f080e7Smrj uint_t wd_cookie_cnt; 18512f080e7Smrj off_t wd_offset; 18612f080e7Smrj size_t wd_size; 18712f080e7Smrj ddi_dma_cookie_t *wd_first_cookie; 18812f080e7Smrj rootnex_trim_t wd_trim; 18912f080e7Smrj #if !defined(__amd64) 19012f080e7Smrj boolean_t wd_remap_copybuf; 19112f080e7Smrj #endif 19212f080e7Smrj } rootnex_window_t; 19312f080e7Smrj 19412f080e7Smrj /* per dma handle private state */ 19512f080e7Smrj typedef struct rootnex_dma_s { 19612f080e7Smrj /* 19712f080e7Smrj * sgl related state used to build and describe the sgl. 19812f080e7Smrj * 19912f080e7Smrj * dp_partial_required - used in the bind slow path to identify if we 20012f080e7Smrj * need to do a partial mapping or not. 20112f080e7Smrj * dp_trim_required - used in the bind slow path to identify if we 20212f080e7Smrj * need to trim when switching to a new window. This should only be 20312f080e7Smrj * set when partial is set. 20412f080e7Smrj * dp_granularity_power_2 - set in alloc handle and used in bind slow 20512f080e7Smrj * path to determine if we & or % to calculate the trim. 20612f080e7Smrj * dp_dma - copy of dma "object" passed in during bind 20712f080e7Smrj * dp_maxxfer - trimmed dma_attr_maxxfer so that it is a whole 20812f080e7Smrj * multiple of granularity 20912f080e7Smrj * dp_sglinfo - See rootnex_sglinfo_t above. 21012f080e7Smrj */ 21112f080e7Smrj boolean_t dp_partial_required; 21212f080e7Smrj boolean_t dp_trim_required; 21312f080e7Smrj boolean_t dp_granularity_power_2; 21412f080e7Smrj uint64_t dp_maxxfer; 21512f080e7Smrj ddi_dma_obj_t dp_dma; 21612f080e7Smrj rootnex_sglinfo_t dp_sglinfo; 21712f080e7Smrj 21812f080e7Smrj /* 21912f080e7Smrj * Copy buffer related state 22012f080e7Smrj * 22112f080e7Smrj * dp_copybuf_size - the actual size of the copy buffer that we are 22212f080e7Smrj * using. This can be smaller that dp_copybuf_req, i.e. bind size > 22312f080e7Smrj * max copy buffer size. 22412f080e7Smrj * dp_cbaddr - kernel address of copy buffer. Used to determine where 22512f080e7Smrj * where to copy to/from. 22612f080e7Smrj * dp_cbsize - the "real" size returned from the copy buffer alloc. 22712f080e7Smrj * Set in the copybuf alloc and used to free copybuf. 22812f080e7Smrj * dp_pgmap - page map used in sync to determine which pages in the 22912f080e7Smrj * buffer use the copy buffer and what addresses to use to copy to/ 23012f080e7Smrj * from. 23112f080e7Smrj * dp_cb_remaping - status if this bind causes us to have to remap 23212f080e7Smrj * the copybuf when switching to new windows. This is only used in 23312f080e7Smrj * the 32-bit kernel since we use seg kpm in the 64-bit kernel for 23412f080e7Smrj * this case. 23512f080e7Smrj * dp_kva - kernel heap arena vmem space for mapping to buffers which 23612f080e7Smrj * we don't have a kernel VA to bcopy to/from. This is only used in 23712f080e7Smrj * the 32-bit kernel since we use seg kpm in the 64-bit kernel for 23812f080e7Smrj * this case. 23912f080e7Smrj */ 24012f080e7Smrj size_t dp_copybuf_size; 24112f080e7Smrj caddr_t dp_cbaddr; 24212f080e7Smrj size_t dp_cbsize; 24312f080e7Smrj rootnex_pgmap_t *dp_pgmap; 24412f080e7Smrj #if !defined(__amd64) 24512f080e7Smrj boolean_t dp_cb_remaping; 24612f080e7Smrj caddr_t dp_kva; 24712f080e7Smrj #endif 24812f080e7Smrj 24912f080e7Smrj /* 25012f080e7Smrj * window related state. The pointer to the window state array which may 25112f080e7Smrj * be a pointer into the pre allocated state, or we may have had to 25212f080e7Smrj * allocate the window array on the fly because it wouldn't fit. If 25312f080e7Smrj * we allocate it, we'll use dp_need_to_free_window and dp_window_size 25412f080e7Smrj * during cleanup. dp_current_win keeps track of the current window. 25512f080e7Smrj * dp_max_win is the maximum number of windows we could have. 25612f080e7Smrj */ 25712f080e7Smrj uint_t dp_current_win; 25812f080e7Smrj rootnex_window_t *dp_window; 25912f080e7Smrj boolean_t dp_need_to_free_window; 26012f080e7Smrj uint_t dp_window_size; 26112f080e7Smrj uint_t dp_max_win; 26212f080e7Smrj 26312f080e7Smrj /* dip of driver which "owns" handle. set to rdip in alloc_handle() */ 26412f080e7Smrj dev_info_t *dp_dip; 26512f080e7Smrj 26612f080e7Smrj /* 26712f080e7Smrj * dp_mutex and dp_inuse are only used to see if a driver is trying to 26812f080e7Smrj * bind to an already bound dma handle. dp_mutex only used for dp_inuse 26912f080e7Smrj */ 27012f080e7Smrj kmutex_t dp_mutex; 27112f080e7Smrj boolean_t dp_inuse; 27212f080e7Smrj 27312f080e7Smrj /* 27412f080e7Smrj * cookie related state. The pointer to the cookies (dp_cookies) may 27512f080e7Smrj * be a pointer into the pre allocated state, or we may have had to 27612f080e7Smrj * allocate the cookie array on the fly because it wouldn't fit. If 27712f080e7Smrj * we allocate it, we'll use dp_need_to_free_cookie and dp_cookie_size 27812f080e7Smrj * during cleanup. dp_current_cookie is only used in the obsoleted 27912f080e7Smrj * interfaces to determine when we've used up all the cookies in a 28012f080e7Smrj * window during nextseg().. 28112f080e7Smrj */ 28212f080e7Smrj size_t dp_cookie_size; 28312f080e7Smrj ddi_dma_cookie_t *dp_cookies; 28412f080e7Smrj boolean_t dp_need_to_free_cookie; 28512f080e7Smrj uint_t dp_current_cookie; /* for obsoleted I/Fs */ 28612f080e7Smrj 28712f080e7Smrj /* 28812f080e7Smrj * pre allocated space for the bind state, allocated during alloc 28912f080e7Smrj * handle. For a lot of devices, this will save us from having to do 29012f080e7Smrj * kmem_alloc's during the bind most of the time. kmem_alloc's can be 29112f080e7Smrj * expensive on x86 when the cpu count goes up since xcalls are 29212f080e7Smrj * expensive on x86. 29312f080e7Smrj */ 29412f080e7Smrj uchar_t *dp_prealloc_buffer; 29586c1f4dcSVikram Hegde 29686c1f4dcSVikram Hegde /* 29786c1f4dcSVikram Hegde * intel iommu related state 29886c1f4dcSVikram Hegde * dvma_cookies saves the dvma allocated for this handler, it has the 29986c1f4dcSVikram Hegde * size of si_max_pages, set when bind handler and freed when unbind 30086c1f4dcSVikram Hegde */ 30186c1f4dcSVikram Hegde void *dp_dvma_cookies; 30212f080e7Smrj } rootnex_dma_t; 30312f080e7Smrj 30412f080e7Smrj /* 30512f080e7Smrj * profile/performance counters. Most things will be dtrace probes, but there 30612f080e7Smrj * are a couple of things we want to keep track all the time. We track the 30712f080e7Smrj * total number of active handles and binds (i.e. an alloc without a free or 30812f080e7Smrj * a bind without an unbind) since rootnex attach. We also track the total 30912f080e7Smrj * number of binds which have failed since rootnex attach. 31012f080e7Smrj */ 31112f080e7Smrj typedef enum { 31212f080e7Smrj ROOTNEX_CNT_ACTIVE_HDLS = 0, 31312f080e7Smrj ROOTNEX_CNT_ACTIVE_BINDS = 1, 31412f080e7Smrj ROOTNEX_CNT_ALLOC_FAIL = 2, 31512f080e7Smrj ROOTNEX_CNT_BIND_FAIL = 3, 31612f080e7Smrj ROOTNEX_CNT_SYNC_FAIL = 4, 31712f080e7Smrj ROOTNEX_CNT_GETWIN_FAIL = 5, 31812f080e7Smrj 31912f080e7Smrj /* This one must be last */ 32012f080e7Smrj ROOTNEX_CNT_LAST 32112f080e7Smrj } rootnex_cnt_t; 32212f080e7Smrj 32312f080e7Smrj /* 32412f080e7Smrj * global driver state. 32512f080e7Smrj * r_dmahdl_cache - dma_handle kmem_cache 32612f080e7Smrj * r_dvma_call_list_id - ddi_set_callback() id 32712f080e7Smrj * r_peekpoke_mutex - serialize peeks and pokes. 32812f080e7Smrj * r_dip - rootnex dip 32912f080e7Smrj * r_reserved_msg_printed - ctlops reserve message threshold 33012f080e7Smrj * r_counters - profile/performance counters 33186c1f4dcSVikram Hegde * r_intel_iommu_enabled - intel iommu enabled 33212f080e7Smrj */ 33312f080e7Smrj typedef struct rootnex_state_s { 33412f080e7Smrj uint_t r_prealloc_cookies; 33512f080e7Smrj uint_t r_prealloc_size; 33612f080e7Smrj kmem_cache_t *r_dmahdl_cache; 33712f080e7Smrj uintptr_t r_dvma_call_list_id; 33812f080e7Smrj kmutex_t r_peekpoke_mutex; 33912f080e7Smrj dev_info_t *r_dip; 3407aec1d6eScindi ddi_iblock_cookie_t r_err_ibc; 34112f080e7Smrj boolean_t r_reserved_msg_printed; 34212f080e7Smrj uint64_t r_counters[ROOTNEX_CNT_LAST]; 34386c1f4dcSVikram Hegde boolean_t r_intel_iommu_enabled; 344*20906b23SVikram Hegde iommulib_nexhandle_t r_iommulib_handle; 34512f080e7Smrj } rootnex_state_t; 34612f080e7Smrj 34712f080e7Smrj 34812f080e7Smrj #ifdef __cplusplus 34912f080e7Smrj } 35012f080e7Smrj #endif 35112f080e7Smrj 35212f080e7Smrj #endif /* _SYS_ROOTNEX_H */ 353